Skip to content

Commit

Permalink
Added user_question embedded field in PromptAction data_object with t…
Browse files Browse the repository at this point in the history
…ype and value and fixed test cases.
  • Loading branch information
maheshsattala committed Oct 16, 2023
1 parent 88c2fe6 commit b1f0130
Show file tree
Hide file tree
Showing 8 changed files with 57 additions and 42 deletions.
12 changes: 6 additions & 6 deletions kairon/actions/definitions/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from kairon.actions.definitions.base import ActionsBase
from kairon.shared.actions.data_objects import ActionServerLogs
from kairon.shared.actions.exception import ActionFailure
from rasa_sdk.forms import REQUESTED_SLOT
from kairon.shared.actions.models import ActionType, UserMessageType
from kairon.shared.actions.utils import ActionUtility
from kairon.shared.constants import FAQ_DISABLED_ERR, KaironSystemSlots, KAIRON_USER_MSG_ENTITY
Expand Down Expand Up @@ -61,8 +60,8 @@ async def execute(self, dispatcher: CollectingDispatcher, tracker: Tracker, doma

try:
k_faq_action_config, bot_settings = self.retrieve_config()
prompt_question = k_faq_action_config.get('prompt_question')
user_msg = self.__get_user_msg(tracker, prompt_question)
user_question = k_faq_action_config.get('user_question')
user_msg = self.__get_user_msg(tracker, user_question)
llm_params = await self.__get_llm_params(k_faq_action_config, dispatcher, tracker, domain)
llm = LLMFactory.get_instance("faq")(self.bot, bot_settings["llm_settings"])
llm_response = await llm.predict(user_msg, **llm_params)
Expand Down Expand Up @@ -183,9 +182,10 @@ def __add_user_context_to_http_response(http_response, tracker_data):
return response_context

@staticmethod
def __get_user_msg(tracker: Tracker, prompt_question: UserMessageType):
if prompt_question == UserMessageType.from_slot.value:
slot = tracker.get_slot(REQUESTED_SLOT)
def __get_user_msg(tracker: Tracker, user_question: Dict):
user_question_type = user_question.get('type')
if user_question_type == UserMessageType.from_slot.value:
slot = user_question.get('value')
user_msg = tracker.get_slot(slot)
else:
user_msg = tracker.latest_message.get('text')
Expand Down
7 changes: 6 additions & 1 deletion kairon/api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -889,11 +889,16 @@ class LlmPromptRequest(BaseModel):
is_enabled: bool = True


class UserQuestionModel(BaseModel):
type: UserMessageType = UserMessageType.from_user_message.value
value: str = None


class PromptActionConfigRequest(BaseModel):
name: constr(to_lower=True, strip_whitespace=True)
num_bot_responses: int = 5
failure_message: str = DEFAULT_NLU_FALLBACK_RESPONSE
prompt_question: UserMessageType = UserMessageType.from_user_message.value
user_question: UserQuestionModel = UserQuestionModel()
top_results: int = 10
similarity_threshold: float = 0.70
enable_response_cache: bool = False
Expand Down
9 changes: 7 additions & 2 deletions kairon/shared/actions/data_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,6 +655,12 @@ def validate(self, clean=True):
raise ValidationError("System prompt must have static source!")


class UserQuestion(EmbeddedDocument):
type = StringField(default=UserMessageType.from_user_message.value,
choices=[p_type.value for p_type in UserMessageType])
value = StringField(default=None)


@auditlogger.log
@push_notification.apply
class PromptAction(Auditlog):
Expand All @@ -664,8 +670,7 @@ class PromptAction(Auditlog):
similarity_threshold = FloatField(default=0.70)
enable_response_cache = BooleanField(default=False)
failure_message = StringField(default=DEFAULT_NLU_FALLBACK_RESPONSE)
prompt_question = StringField(default=UserMessageType.from_user_message.value,
choices=[p_type.value for p_type in UserMessageType])
user_question = EmbeddedDocumentField(UserQuestion, default=UserQuestion())
bot = StringField(required=True)
user = StringField(required=True)
timestamp = DateTimeField(default=datetime.utcnow)
Expand Down
4 changes: 2 additions & 2 deletions kairon/shared/data/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
SlotSetAction, FormValidationAction, EmailActionConfig, GoogleSearchAction, JiraAction, ZendeskAction, \
PipedriveLeadsAction, SetSlots, HubspotFormsAction, HttpActionResponse, SetSlotsFromResponse, \
CustomActionRequestParameters, KaironTwoStageFallbackAction, QuickReplies, RazorpayAction, PromptAction, \
LlmPrompt, FormSlotSet, DatabaseAction, DbOperation, DbQuery, PyscriptActionConfig, WebSearchAction
LlmPrompt, FormSlotSet, DatabaseAction, DbOperation, DbQuery, PyscriptActionConfig, WebSearchAction, UserQuestion
from kairon.shared.actions.models import ActionType, HttpRequestContentType, ActionParameterType, DbQueryValueType
from kairon.shared.data.audit.data_objects import AuditLogData
from kairon.shared.importer.processor import DataImporterLogProcessor
Expand Down Expand Up @@ -5433,7 +5433,7 @@ def edit_prompt_action(self, prompt_action_id: str, request_data: dict, bot: Tex
action = PromptAction.objects(id=prompt_action_id, bot=bot, status=True).get()
action.name = request_data.get("name")
action.failure_message = request_data.get("failure_message")
action.prompt_question = request_data.get("prompt_question")
action.user_question = UserQuestion(**request_data.get("user_question"))
action.top_results = request_data.get("top_results")
action.enable_response_cache = request_data.get("enable_response_cache", False)
action.similarity_threshold = request_data.get("similarity_threshold")
Expand Down
6 changes: 3 additions & 3 deletions tests/integration_test/action_service_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
EmailActionConfig, ActionServerLogs, GoogleSearchAction, JiraAction, ZendeskAction, PipedriveLeadsAction, SetSlots, \
HubspotFormsAction, HttpActionResponse, HttpActionRequestBody, SetSlotsFromResponse, CustomActionRequestParameters, \
KaironTwoStageFallbackAction, TwoStageFallbackTextualRecommendations, RazorpayAction, PromptAction, FormSlotSet, \
DatabaseAction, DbOperation, DbQuery, PyscriptActionConfig, WebSearchAction
DatabaseAction, DbOperation, DbQuery, PyscriptActionConfig, WebSearchAction, UserQuestion
from kairon.shared.actions.models import ActionType, ActionParameterType, DispatchType
from kairon.shared.actions.utils import ActionUtility
from kairon.shared.admin.constants import BotSecretType
Expand Down Expand Up @@ -9598,11 +9598,11 @@ def test_prompt_action_response_action_with_prompt_question_from_slot(mock_searc
Actions(name=action_name, type=ActionType.prompt_action.value, bot=bot, user=user).save()
BotSettings(llm_settings=LLMSettings(enable_faq=True), bot=bot, user=user).save()
PromptAction(name=action_name, bot=bot, user=user, num_bot_responses=2, llm_prompts=llm_prompts,
prompt_question="from_slot").save()
user_question=UserQuestion(type="from_slot", value="prompt_question")).save()
BotSecrets(secret_type=BotSecretType.gpt_key.value, value=value, bot=bot, user=user).save()

request_object = json.load(open("tests/testing_data/actions/action-request.json"))
request_object["tracker"]["slots"] = {"bot": bot, "user_question": user_msg, "requested_slot": "user_question"}
request_object["tracker"]["slots"] = {"bot": bot, "prompt_question": user_msg}
request_object["next_action"] = action_name
request_object["tracker"]["sender_id"] = user
request_object['tracker']['events'] = [{"event": "user", 'text': 'hello',
Expand Down
6 changes: 3 additions & 3 deletions tests/integration_test/services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2036,7 +2036,7 @@ def _mock_get_bot_settings(*args, **kwargs):
return BotSettings(bot=pytest.bot, user="[email protected]", llm_settings=LLMSettings(enable_faq=True))

monkeypatch.setattr(MongoProcessor, 'get_bot_settings', _mock_get_bot_settings)
action = {'name': 'test_add_prompt_action', 'prompt_question': 'from_user_message',
action = {'name': 'test_add_prompt_action', 'user_question': {'type': 'from_user_message'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt',
Expand Down Expand Up @@ -2268,7 +2268,7 @@ def test_update_prompt_action_with_query_prompt_with_false():


def test_update_prompt_action():
action = {'name': 'test_update_prompt_action', 'prompt_question': 'from_slot',
action = {'name': 'test_update_prompt_action', 'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity_analytical Prompt',
Expand Down Expand Up @@ -2311,7 +2311,7 @@ def test_get_prompt_action():
assert actual["data"] == [
{'name': 'test_update_prompt_action', 'num_bot_responses': 5, 'top_results': 9, 'similarity_threshold': 0.5,
'enable_response_cache': False, 'failure_message': 'updated_failure_message',
'prompt_question': 'from_slot',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
Expand Down
4 changes: 2 additions & 2 deletions tests/unit_test/action/action_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2692,7 +2692,7 @@ def test_get_prompt_action_config(self):
actual_config.pop("status")
actual_config.pop("user")
assert actual_config == {'name': 'kairon_faq_action', 'num_bot_responses': 5, 'top_results': 10,
'prompt_question': 'from_user_message',
'user_question': {'type': 'from_user_message'},
'similarity_threshold': 0.7,
'failure_message': "I'm sorry, I didn't quite understand that. Could you rephrase?",
'bot': 'test_action_server', 'enable_response_cache': False,
Expand Down Expand Up @@ -3977,7 +3977,7 @@ def test_get_prompt_action_config_2(self):
assert k_faq_action_config == {'name': 'kairon_faq_action', 'num_bot_responses': 5, 'top_results': 10,
'similarity_threshold': 0.7,
'enable_response_cache': False,
'prompt_question': 'from_user_message',
'user_question': {'type': 'from_user_message'},
'failure_message': "I'm sorry, I didn't quite understand that. Could you rephrase?",
'bot': 'test_bot_action_test', 'user': 'test_user_action_test',
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo',
Expand Down
51 changes: 28 additions & 23 deletions tests/unit_test/data_processor/data_processor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,8 @@ def test_add_prompt_action_faq_action_with_default_values_and_instructions(self)
processor = MongoProcessor()
bot = 'test_bot'
user = 'test_user'
request = {'name': 'test_add_prompt_action_faq_action_with_default_values', 'prompt_question': 'from_slot',
request = {'name': 'test_add_prompt_action_faq_action_with_default_values',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'History Prompt', 'type': 'user', 'source': 'history', 'is_enabled': True}],
Expand All @@ -511,7 +512,7 @@ def test_add_prompt_action_faq_action_with_default_values_and_instructions(self)
{'name': 'test_add_prompt_action_faq_action_with_default_values',
'num_bot_responses': 5, 'top_results': 10, 'similarity_threshold': 0.7,
'failure_message': "I'm sorry, I didn't quite understand that. Could you rephrase?",
'enable_response_cache': False, 'prompt_question': 'from_slot',
'enable_response_cache': False, 'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
Expand Down Expand Up @@ -753,7 +754,8 @@ def test_edit_prompt_action_faq_action(self):
processor = MongoProcessor()
bot = 'test_bot'
user = 'test_user'
request = {'name': 'test_edit_prompt_action_faq_action', 'prompt_question': 'from_user_message',
request = {'name': 'test_edit_prompt_action_faq_action',
'user_question': {'type': 'from_user_message'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt',
Expand Down Expand Up @@ -783,7 +785,7 @@ def test_edit_prompt_action_faq_action(self):
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
'prompt_question': 'from_user_message',
'user_question': {'type': 'from_user_message'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt',
Expand All @@ -798,7 +800,8 @@ def test_edit_prompt_action_faq_action(self):
"set_slots": [{"name": "gpt_result", "value": "${data}", "evaluation_type": "expression"},
{"name": "gpt_result_type", "value": "${data.type}", "evaluation_type": "script"}],
"dispatch_response": False}]
request = {'name': 'test_edit_prompt_action_faq_action_again', 'prompt_question': 'from_slot',
request = {'name': 'test_edit_prompt_action_faq_action_again',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static'}], 'instructions': ['Answer in a short manner.', 'Keep it simple.']}
processor.edit_prompt_action(pytest.action_id, request, bot, user)
Expand All @@ -807,7 +810,8 @@ def test_edit_prompt_action_faq_action(self):
assert action == [
{'name': 'test_edit_prompt_action_faq_action_again', 'num_bot_responses': 5, 'top_results': 10,
'similarity_threshold': 0.7, 'failure_message': "I'm sorry, I didn't quite understand that. Could you rephrase?",
'enable_response_cache': False, 'prompt_question': 'from_slot',
'enable_response_cache': False,
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
Expand All @@ -821,21 +825,22 @@ def test_edit_prompt_action_with_less_hyperparameters(self):
processor = MongoProcessor()
bot = 'test_bot'
user = 'test_user'
request = {'name': 'test_edit_prompt_action_with_less_hyperparameters', 'prompt_question': 'from_slot',
'llm_prompts': [
{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system', 'source': 'static',
'is_enabled': True},
{'name': 'Similarity Prompt',
'instructions': 'Answer question based on the context above, if answer is not in the context go check previous logs.',
'type': 'user', 'source': 'bot_content', 'is_enabled': True},
{'name': 'Query Prompt',
'data': 'A programming language is a system of notation for writing computer programs.[1] Most programming languages are text-based formal languages, but they may also be graphical. They are a kind of computer language.',
'instructions': 'Answer according to the context', 'type': 'query', 'source': 'static',
'is_enabled': True},
{'name': 'Query Prompt',
'data': 'If there is no specific query, assume that user is aking about java programming.',
'instructions': 'Answer according to the context', 'type': 'query',
'source': 'static', 'is_enabled': True}],
request = {'name': 'test_edit_prompt_action_with_less_hyperparameters',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [
{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt',
'instructions': 'Answer question based on the context above, if answer is not in the context go check previous logs.',
'type': 'user', 'source': 'bot_content', 'is_enabled': True},
{'name': 'Query Prompt',
'data': 'A programming language is a system of notation for writing computer programs.[1] Most programming languages are text-based formal languages, but they may also be graphical. They are a kind of computer language.',
'instructions': 'Answer according to the context', 'type': 'query', 'source': 'static',
'is_enabled': True},
{'name': 'Query Prompt',
'data': 'If there is no specific query, assume that user is aking about java programming.',
'instructions': 'Answer according to the context', 'type': 'query',
'source': 'static', 'is_enabled': True}],
"failure_message": "updated_failure_message", "top_results": 10, "similarity_threshold": 0.70,
"use_query_prompt": True, "use_bot_responses": True, "query_prompt": "updated_query_prompt",
"num_bot_responses": 5, "hyperparameters": {"temperature": 0.0,
Expand All @@ -853,7 +858,7 @@ def test_edit_prompt_action_with_less_hyperparameters(self):
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
'prompt_question': 'from_slot',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt', 'instructions': 'Answer question based on the context above, if answer is not in the context go check previous logs.',
Expand Down Expand Up @@ -881,7 +886,7 @@ def test_get_prompt_faq_action(self):
'hyperparameters': {'temperature': 0.0, 'max_tokens': 300, 'model': 'gpt-3.5-turbo', 'top_p': 0.0, 'n': 1,
'stream': False, 'stop': None, 'presence_penalty': 0.0, 'frequency_penalty': 0.0,
'logit_bias': {}},
'prompt_question': 'from_slot',
'user_question': {'type': 'from_slot', 'value': 'prompt_question'},
'llm_prompts': [{'name': 'System Prompt', 'data': 'You are a personal assistant.', 'type': 'system',
'source': 'static', 'is_enabled': True},
{'name': 'Similarity Prompt', 'instructions': 'Answer question based on the context above, if answer is not in the context go check previous logs.',
Expand Down

0 comments on commit b1f0130

Please sign in to comment.