Skip to content

Commit

Permalink
validation for gpt enablement in kairon faq action (#885)
Browse files Browse the repository at this point in the history
1. Added chat_client_config.yml in tests/testing_data/validator/valid
2. Made few changes related to upload chat_client_config
3. Added validation that gpt is enabled before saving bot content and kairon faq action.
4. Added/Modified Unit and Integration Tests.
  • Loading branch information
maheshsattala authored Mar 28, 2023
1 parent f02dede commit e9ebfa9
Show file tree
Hide file tree
Showing 10 changed files with 360 additions and 140 deletions.
2 changes: 1 addition & 1 deletion kairon/importer/data_importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,5 +53,5 @@ def import_data(self):
self.validator.story_graph,
self.validator.intents,
self.validator.actions,
self.validator.chat_client_config,
self.validator.chat_client_config.get('config'),
self.overwrite, self.files_to_save)
3 changes: 2 additions & 1 deletion kairon/importer/validator/file_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ async def from_training_files(cls, training_data_paths: str, domain_path: str, c
domain_path=domain_path, training_data_paths=training_data_paths, config_file=config_path,
)
cls.actions = Utility.read_yaml(os.path.join(root_dir, 'actions.yml'))
cls.chat_client_config = Utility.read_yaml(os.path.join(root_dir, 'chat_client_config.yml'))
chat_client_config = Utility.read_yaml(os.path.join(root_dir, 'chat_client_config.yml'))
cls.chat_client_config = chat_client_config if chat_client_config else {}

return await TrainingDataValidator.from_importer(file_importer)
except YamlValidationException as e:
Expand Down
29 changes: 11 additions & 18 deletions kairon/shared/data/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1350,6 +1350,7 @@ def load_chat_client_config(self, bot: Text, user: Text):
config_dict = config.to_mongo().to_dict()
config_dict["config"].pop("headers", None)
config_dict["config"].pop("multilingual", None)
config_dict.pop("_id", None)
config_dict.pop("bot", None)
config_dict.pop("status", None)
config_dict.pop("user", None)
Expand Down Expand Up @@ -3405,6 +3406,8 @@ def save_data_without_event(self, data_home_dir: Text, bot: Text, user: Text, ov
error_summary['config'] = errors
if os.path.exists(chat_client_config_path):
chat_client_config = Utility.read_yaml(chat_client_config_path)
print(chat_client_config)
chat_client_config = chat_client_config["config"]

if not validation_failed and not error_summary.get('config'):
files_to_save = set()
Expand Down Expand Up @@ -3667,23 +3670,6 @@ def get_bot_settings(bot: Text, user: Text):
settings = BotSettings(bot=bot, user=user).save()
return settings

def add_rule_for_kairon_faq_action(self, bot: Text, user: Text):
search_event = StoryEvents(name=DEFAULT_NLU_FALLBACK_INTENT_NAME, type=UserUttered.type_name)
events = [
StoryEvents(name=RULE_SNIPPET_ACTION_NAME, type=ActionExecuted.type_name),
StoryEvents(name=DEFAULT_NLU_FALLBACK_INTENT_NAME, type=UserUttered.type_name),
StoryEvents(name=GPT_LLM_FAQ, type=ActionExecuted.type_name)
]
try:
rule = Rules.objects(bot=bot, status=True, events__match=search_event).first()
if not rule:
raise DoesNotExist('Rule with nlu_fallback event not found')
rule.events = events
rule.save()
except DoesNotExist as e:
logging.exception(e)
Rules(block_name=DEFAULT_LLM_FALLBACK_RULE, bot=bot, user=user, start_checkpoints=[STORY_START], events=events).save()

def save_chat_client_config(self, config: dict, bot: Text, user: Text):
from kairon.shared.account.processor import AccountProcessor

Expand Down Expand Up @@ -4692,6 +4678,10 @@ def add_kairon_faq_action(self, request_data: dict, bot: Text, user: Text):
:param bot: bot id
:param user: user
"""
bot_settings = self.get_bot_settings(bot=bot, user=user)
if not bot_settings['enable_gpt_llm_faq']:
raise AppException('Faq feature is disabled for the bot! Please contact support.')

Utility.is_exist(KaironFaqAction, bot=bot, name__iexact=KAIRON_FAQ_ACTION, exp_message="Action already exists!")
request_data['bot'] = bot
request_data['user'] = user
Expand Down Expand Up @@ -5043,6 +5033,9 @@ def get_razorpay_action_config(self, bot: Text, with_doc_id: bool = True):
yield action

def save_content(self, content: Text, user: Text, bot: Text):
bot_settings = self.get_bot_settings(bot=bot, user=user)
if not bot_settings['enable_gpt_llm_faq']:
raise AppException('Faq feature is disabled for the bot! Please contact support.')
if len(content.split()) < 10:
raise AppException("Content should contain atleast 10 words.")

Expand All @@ -5064,7 +5057,7 @@ def update_content(self, content_id: str, content: Text, user: Text, bot: Text):
exp_message="Text already exists!")

try:
content_obj = BotContent.objects(bot=bot, user=user, id=content_id).get()
content_obj = BotContent.objects(bot=bot, id=content_id).get()
content_obj.data = content
content_obj.user = user
content_obj.timestamp = datetime.utcnow()
Expand Down
1 change: 0 additions & 1 deletion kairon/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,6 @@ def start_training(bot: str, user: str, token: str = None):
ModelProcessor.set_training_status(bot=bot, user=user, status=EVENT_STATUS.INPROGRESS.value)
settings = processor.get_bot_settings(bot, user)
if settings['enable_gpt_llm_faq']:
processor.add_rule_for_kairon_faq_action(bot, user)
llm = LLMFactory.get_instance(bot, "faq")
faqs = llm.train()
account = AccountProcessor.get_bot(bot)['account']
Expand Down
124 changes: 122 additions & 2 deletions tests/integration_test/action_service_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
from kairon.shared.actions.data_objects import HttpActionConfig, SlotSetAction, Actions, FormValidationAction, \
EmailActionConfig, ActionServerLogs, GoogleSearchAction, JiraAction, ZendeskAction, PipedriveLeadsAction, SetSlots, \
HubspotFormsAction, HttpActionResponse, HttpActionRequestBody, SetSlotsFromResponse, CustomActionRequestParameters, \
KaironTwoStageFallbackAction, TwoStageFallbackTextualRecommendations, RazorpayAction
KaironTwoStageFallbackAction, TwoStageFallbackTextualRecommendations, RazorpayAction, KaironFaqAction
from kairon.shared.actions.models import ActionType, ActionParameterType
from kairon.shared.admin.constants import BotSecretType
from kairon.shared.admin.data_objects import BotSecrets
from kairon.shared.constants import KAIRON_USER_MSG_ENTITY
from kairon.shared.data.constant import KAIRON_TWO_STAGE_FALLBACK, FALLBACK_MESSAGE, GPT_LLM_FAQ, \
DEFAULT_NLU_FALLBACK_RESPONSE
DEFAULT_NLU_FALLBACK_RESPONSE, KAIRON_FAQ_ACTION, DEFAULT_SYSTEM_PROMPT, DEFAULT_CONTEXT_PROMPT
import numpy as np
from kairon.shared.data.data_objects import Slots, KeyVault, BotSettings
from kairon.shared.data.processor import MongoProcessor
Expand Down Expand Up @@ -5245,6 +5245,126 @@ async def mock_process_actions(*args, **kwargs):
self.assertEqual(response_json, {'error': "No registered action found for name 'Action Not Found Exception'.",
'action_name': 'Action Not Found Exception'})

@patch("kairon.shared.llm.gpt3.openai.ChatCompletion.create", autospec=True)
@patch("kairon.shared.llm.gpt3.openai.Embedding.create", autospec=True)
@patch("kairon.shared.llm.gpt3.Utility.execute_http_request", autospec=True)
def test_kairon_faq_response_action_with_bot_responses(self, mock_search, mock_embedding, mock_completion):
from kairon.shared.llm.gpt3 import GPT3FAQEmbedding
from openai.util import convert_to_openai_object
from openai.openai_response import OpenAIResponse
from uuid6 import uuid7

action_name = KAIRON_FAQ_ACTION
bot = "5f50fd0a56b698ca10d35d2k"
user = "udit.pandey"
value = "keyvalue"
user_msg = "What kind of language is python?"
bot_content = "Python is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically typed and garbage-collected."
generated_text = "Python is dynamically typed, garbage-collected, high level, general purpose programming."

embedding = list(np.random.random(GPT3FAQEmbedding.__embedding__))
mock_embedding.return_value = convert_to_openai_object(OpenAIResponse({'data': [{'embedding': embedding}]}, {}))
mock_completion.return_value = convert_to_openai_object(
OpenAIResponse({'choices': [{'message': {'content': generated_text, 'role': 'assistant'}}]}, {}))
mock_search.return_value = {
'result': [{'id': uuid7().__str__(), 'score': 0.80, 'payload': {'content': bot_content}}]}
Actions(name=action_name, type=ActionType.kairon_faq_action.value, bot=bot, user=user).save()
BotSettings(enable_gpt_llm_faq=True, bot=bot, user=user).save()
KaironFaqAction(bot=bot, user=user, use_bot_responses=True, num_bot_responses=2).save()
BotSecrets(secret_type=BotSecretType.gpt_key.value, value=value, bot=bot, user=user).save()

request_object = json.load(open("tests/testing_data/actions/action-request.json"))
request_object["tracker"]["slots"]["bot"] = bot
request_object["next_action"] = action_name
request_object["tracker"]["sender_id"] = user
request_object["tracker"]["latest_message"]['text'] = user_msg
request_object['tracker']['events'] = [{"event": "bot", 'text': 'hello'},
{'event': 'bot', "text": "how are you"}]

response = self.fetch("/webhook", method="POST", body=json.dumps(request_object).encode('utf-8'))
response_json = json.loads(response.body.decode("utf8"))
self.assertEqual(response_json['events'], [
{'event': 'slot', 'timestamp': None, 'name': 'kairon_action_response', 'value': generated_text}])
self.assertEqual(
response_json['responses'],
[{'text': generated_text, 'buttons': [], 'elements': [], 'custom': {}, 'template': None,
'response': None, 'image': None, 'attachment': None}
])
assert mock_completion.call_args.kwargs[
'messages'] == [
{"role": "system",
"content": DEFAULT_SYSTEM_PROMPT},
{"role": "user",
"content": f"{DEFAULT_CONTEXT_PROMPT} \n\nContext:\n{bot_content}\n\n Q: {user_msg}\n A:"},
{"role": "assistant",
"content": "hello\nhow are you\n"}
]

@patch("kairon.shared.llm.gpt3.openai.ChatCompletion.create", autospec=True)
@patch("kairon.shared.llm.gpt3.openai.Embedding.create", autospec=True)
@patch("kairon.shared.llm.gpt3.Utility.execute_http_request", autospec=True)
def test_kairon_faq_response_action_with_query_prompt(self, mock_search, mock_embedding, mock_completion):
from kairon.shared.llm.gpt3 import GPT3FAQEmbedding
from openai.util import convert_to_openai_object
from openai.openai_response import OpenAIResponse
from uuid6 import uuid7

action_name = KAIRON_FAQ_ACTION
bot = "5f50fd0a56b698ca10d35d2s"
user = "udit.pandey"
value = "keyvalue"
user_msg = "What kind of language is python?"
bot_content = "Python is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically typed and garbage-collected."
generated_text = "Python is dynamically typed, garbage-collected, high level, general purpose programming."
query_prompt = "A programming language is a system of notation for writing computer programs.[1] Most programming languages are text-based formal languages, but they may also be graphical. They are a kind of computer language."
rephrased_query = "Explain python is called high level programming language in laymen terms?"

def mock_completion_for_query_prompt(*args, **kwargs):
return convert_to_openai_object(
OpenAIResponse({'choices': [{'message': {'content': rephrased_query, 'role': 'assistant'}}]}, {}))

def mock_completion_for_answer(*args, **kwargs):
return convert_to_openai_object(
OpenAIResponse({'choices': [{'message': {'content': generated_text, 'role': 'assistant'}}]}, {}))

mock_completion.side_effect = [mock_completion_for_query_prompt(), mock_completion_for_answer()]

embedding = list(np.random.random(GPT3FAQEmbedding.__embedding__))
mock_embedding.return_value = convert_to_openai_object(OpenAIResponse({'data': [{'embedding': embedding}]}, {}))
mock_completion.return_value = convert_to_openai_object(
OpenAIResponse({'choices': [{'message': {'content': generated_text, 'role': 'assistant'}}]}, {}))
mock_search.return_value = {
'result': [{'id': uuid7().__str__(), 'score': 0.80, 'payload': {'content': bot_content}}]}
Actions(name=action_name, type=ActionType.kairon_faq_action.value, bot=bot, user=user).save()
BotSettings(enable_gpt_llm_faq=True, bot=bot, user=user).save()
KaironFaqAction(bot=bot, user=user, use_query_prompt=True, query_prompt=query_prompt).save()
BotSecrets(secret_type=BotSecretType.gpt_key.value, value=value, bot=bot, user=user).save()

request_object = json.load(open("tests/testing_data/actions/action-request.json"))
request_object["tracker"]["slots"]["bot"] = bot
request_object["next_action"] = action_name
request_object["tracker"]["sender_id"] = user
request_object["tracker"]["latest_message"]['text'] = user_msg
request_object['tracker']['events'] = [{"event": "bot", 'text': 'hello'},
{'event': 'bot', "text": "how are you"}]

response = self.fetch("/webhook", method="POST", body=json.dumps(request_object).encode('utf-8'))
response_json = json.loads(response.body.decode("utf8"))
self.assertEqual(response_json['events'], [
{'event': 'slot', 'timestamp': None, 'name': 'kairon_action_response', 'value': generated_text}])
self.assertEqual(
response_json['responses'],
[{'text': generated_text, 'buttons': [], 'elements': [], 'custom': {}, 'template': None,
'response': None, 'image': None, 'attachment': None}
])
assert mock_completion.call_args.kwargs[
'messages'] == [
{"role": "system",
"content": DEFAULT_SYSTEM_PROMPT},
{"role": "user",
"content": f"{DEFAULT_CONTEXT_PROMPT} \n\nContext:\n{bot_content}\n\n Q: {rephrased_query}\n A:"}
]

@patch("kairon.shared.llm.gpt3.openai.ChatCompletion.create", autospec=True)
@patch("kairon.shared.llm.gpt3.openai.Embedding.create", autospec=True)
@patch("kairon.shared.llm.gpt3.Utility.execute_http_request", autospec=True)
Expand Down
64 changes: 60 additions & 4 deletions tests/integration_test/services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,28 @@ def test_list_bots():
assert response['data']['shared'] == []


def test_content_upload_api():
def test_content_upload_api_with_gpt_feature_disabled():
response = client.post(
url=f"/api/bot/{pytest.bot}/data/text/faq",
json={
"data": "Data refers to any collection of facts, statistics, or information that can be analyzed or "
"used to inform decision-making. Data can take many forms, including text, numbers, images, "
"audio, and video."
},
headers={"Authorization": pytest.token_type + " " + pytest.access_token}
)
actual = response.json()
print(actual)
assert actual["message"] == "Faq feature is disabled for the bot! Please contact support."
assert not actual["data"]
assert actual["error_code"] == 422


def test_content_upload_api(monkeypatch):
def _mock_get_bot_settings(*args, **kwargs):
return BotSettings(bot=pytest.bot, user="[email protected]", enable_gpt_llm_faq=True)

monkeypatch.setattr(MongoProcessor, 'get_bot_settings', _mock_get_bot_settings)
response = client.post(
url=f"/api/bot/{pytest.bot}/data/text/faq",
json={
Expand All @@ -762,7 +783,11 @@ def test_content_upload_api():
assert actual["error_code"] == 0


def test_content_upload_api_invalid():
def test_content_upload_api_invalid(monkeypatch):
def _mock_get_bot_settings(*args, **kwargs):
return BotSettings(bot=pytest.bot, user="[email protected]", enable_gpt_llm_faq=True)

monkeypatch.setattr(MongoProcessor, 'get_bot_settings', _mock_get_bot_settings)
response = client.post(
url=f"/api/bot/{pytest.bot}/data/text/faq",
json={
Expand Down Expand Up @@ -1018,7 +1043,26 @@ def test_add_kairon_faq_action_with_empty_context_prompt():
assert actual["error_code"] == 422


def test_add_kairon_faq_action():
def test_add_kairon_faq_action_with_gpt_feature_disabled():
action = {"system_prompt": DEFAULT_SYSTEM_PROMPT, "context_prompt": DEFAULT_CONTEXT_PROMPT,
"failure_message": DEFAULT_NLU_FALLBACK_RESPONSE, "top_results": 10, "similarity_threshold": 0.70}
response = client.post(
f"/api/bot/{pytest.bot}/action/kairon_faq",
json=action,
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["message"] == 'Faq feature is disabled for the bot! Please contact support.'
assert not actual["data"]
assert not actual["success"]
assert actual["error_code"] == 422


def test_add_kairon_faq_action(monkeypatch):
def _mock_get_bot_settings(*args, **kwargs):
return BotSettings(bot=pytest.bot, user="[email protected]", enable_gpt_llm_faq=True)

monkeypatch.setattr(MongoProcessor, 'get_bot_settings', _mock_get_bot_settings)
action = {"system_prompt": DEFAULT_SYSTEM_PROMPT, "context_prompt": DEFAULT_CONTEXT_PROMPT,
"failure_message": DEFAULT_NLU_FALLBACK_RESPONSE, "top_results": 10, "similarity_threshold": 0.70}
response = client.post(
Expand All @@ -1034,7 +1078,11 @@ def test_add_kairon_faq_action():
assert actual["error_code"] == 0


def test_add_kairon_faq_action_already_exist():
def test_add_kairon_faq_action_already_exist(monkeypatch):
def _mock_get_bot_settings(*args, **kwargs):
return BotSettings(bot=pytest.bot, user="[email protected]", enable_gpt_llm_faq=True)

monkeypatch.setattr(MongoProcessor, 'get_bot_settings', _mock_get_bot_settings)
action = {"system_prompt": DEFAULT_SYSTEM_PROMPT, "context_prompt": DEFAULT_CONTEXT_PROMPT,
"failure_message": DEFAULT_NLU_FALLBACK_RESPONSE, "top_results": 10, "similarity_threshold": 0.70}
response = client.post(
Expand Down Expand Up @@ -1609,6 +1657,14 @@ def test_upload_with_chat_client_config_only():
assert actual['data']["logs"][0]['start_timestamp']
assert actual['data']["logs"][0]['end_timestamp']

response = client.get(f"/api/bot/{pytest.bot}/chat/client/config",
headers={"Authorization": pytest.token_type + " " + pytest.access_token})
actual = response.json()
assert actual["success"]
assert actual["error_code"] == 0
actual['data'].pop('headers')
assert actual["data"] == Utility.read_yaml("tests/testing_data/all/chat_client_config.yml")["config"]


@responses.activate
def test_upload_with_chat_client_config():
Expand Down
Loading

0 comments on commit e9ebfa9

Please sign in to comment.