diff --git a/notebook.ipynb b/notebook.ipynb index 02b6ead..8a1efa6 100644 --- a/notebook.ipynb +++ b/notebook.ipynb @@ -86,9 +86,6 @@ "execution_count": 1, "id": "7e7503f4-8830-4972-a986-10f4c29624b9", "metadata": { - "jupyter": { - "source_hidden": true - }, "tags": [] }, "outputs": [], @@ -99,6 +96,9 @@ " \n", "sys.path.insert(0, os.path.abspath(\"./\"))\n", "\n", + "\n", + "#TODO - add language + CPU/GPU param that change the notebook's flow\n", + "\n", "from src.common import ProjectSecrets" ] }, @@ -124,9 +124,6 @@ "execution_count": 3, "id": "4c54ed9e-3f91-4ca4-a970-16797df94f6f", "metadata": { - "jupyter": { - "source_hidden": true - }, "tags": [] }, "outputs": [], @@ -178,7 +175,7 @@ " user_project=True,\n", " parameters={\n", " \"source\": \"git://github.com/mlrun/demo-call-center.git#main\",\n", - " \"default_image\": \"yonishelach/call-center-11.8:1\",\n", + " \"default_image\": \"yonishelach/call-center-transformers\",\n", " \"gpus\": 2,\n", " },\n", ")" @@ -197,7 +194,7 @@ "\n", "> Note: This entire workflow can be skipped if you want to use data that is already generated and available in this demo. See the [next cell](#skip_and_import_local_data) for more details.\n", "\n", - "The data generation workflow comprises six steps. If you want to skip the agents and clients data generation and just generate calls using the existing agents and clients, then pass `generate_clients_and_agents = True`. You can see each function's docstring and code by clicking the function name in the following list:\n", + "The data generation workflow comprises six steps. If you want to skip the agents and clients data generation and just generate calls using the existing agents and clients, then pass `generate_clients_and_agents = False`. You can see each function's docstring and code by clicking the function name in the following list:\n", "\n", "1. (Skippable) [**Agents & Clients Data Generator**](https://github.com/mlrun/functions/blob/master/structured_data_generator)- ***Hub Function*** — Use OpenAI's ChatGPT to generate the metadata for the call center's agents and clients. The data include fields like first name, last name, phone number, etc. All the agents and clients steps run in parallel.\n", "2. (Skippable) [**Insert Agents & Clients Data to DB**](.src/calls_analysis/data_management.py) — Insert the generated agents and clients data into the MySQL database.\n", @@ -558,6 +555,7 @@ " \"num_clients\": 10,\n", " \"num_agents\": 10,\n", " \"generation_model\": \"gpt-4\",\n", + " #TODO - change the language + language of 'available_voices' based on the languague param that you define at the top of the notebook\n", " \"language\": \"Spanish\",\n", " \"available_voices\": [\n", " \"v2/es_speaker_0\", \n", @@ -948,6 +946,7 @@ " \"batch\": project.get_artifact_uri(\"batch-creation_calls_batch\"),\n", " \"calls_audio_files\": project.get_artifact_uri(\"text-to-audio_audio_files\"),\n", " \"batch_size\": 2,\n", + " # TODO - use the whisper-tiny in case it's CPU\n", " \"transcribe_model\": \"openai/whisper-large-v3\",\n", " \"translate_to_english\": True,\n", " \"pii_recognition_model\": \"whole\",\n", @@ -958,6 +957,7 @@ " \"PHONE\": (\"replace\", {\"new_value\": \"123456789\"}),\n", " },\n", " \"question_answering_model\": \"TheBloke/Mistral-7B-OpenOrca-GPTQ\",\n", + " # TODO - use this model in case it's CPU Qwen/Qwen2-1.5B-Instruct\n", " },\n", " watch=False,\n", " dirty=True,\n", @@ -1171,9 +1171,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "mlrun-base", "language": "python", - "name": "python3" + "name": "conda-env-mlrun-base-py" }, "language_info": { "codemirror_mode": { @@ -1185,7 +1185,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.9.18" } }, "nbformat": 4, diff --git a/src/calls_generation/skip.py b/src/calls_generation/skip.py index 5954c50..4857ddb 100644 --- a/src/calls_generation/skip.py +++ b/src/calls_generation/skip.py @@ -100,13 +100,13 @@ def save_current_example_data(): export_dir.mkdir(parents=True, exist_ok=True) for artifact_name, target_path in [ - ("client-data-generator_clients", "clients.yaml"), - ("agent-data-generator_agents", "agents.yaml"), + ("client-data-generator_clients", "clients.zip"), + ("agent-data-generator_agents", "agents.zip"), ("conversation-generation_conversations", "conversation_generation/conversations.zip"), ("conversation-generation_metadata", "conversation_generation/metadata.zip"), ("conversation-generation_ground_truths", "conversation_generation/ground_truths.zip"), ("text-to-audio_audio_files", "text_to_audio/audio_files.zip"), - ("text-to-audio_audio_files_dataframe", "text_to_audio/dataframe.zip"), + ("text-to-audio_dataframe", "text_to_audio/dataframe.zip"), ("batch-creation_calls_batch", "batch_creation/calls_batch.zip"), ]: export_path = export_dir / target_path diff --git a/src/workflows/calls_analysis.py b/src/workflows/calls_analysis.py index 2b4c280..4c8f409 100644 --- a/src/workflows/calls_analysis.py +++ b/src/workflows/calls_analysis.py @@ -234,6 +234,7 @@ def pipeline( params={ "verbose": True, "model_name": question_answering_model, + # We don't need the auto_gptq_exllama if using CPU, we do need it if using GPU "auto_gptq_exllama_max_input_length": 8192, "device_map": "auto", "text_wrapper": TEXT_WRAPPER, diff --git a/src/workflows/calls_generation.py b/src/workflows/calls_generation.py index 7c30435..40b1bb5 100644 --- a/src/workflows/calls_generation.py +++ b/src/workflows/calls_generation.py @@ -52,8 +52,8 @@ def pipeline( "model_name": generation_model, "language": language, "fields": [ - "first_name: in spanish, no special characters", - "last_name: in spanish, no special characters", + f"first_name: in {language}, no special characters", + f"last_name: in {language}, no special characters", "phone_number", "email", "client_id", @@ -86,8 +86,8 @@ def pipeline( "model_name": generation_model, "language": language, "fields": [ - "first_name: in spanish, no special characters", - "last_name: in spanish, no special characters", + f"first_name: in {language}, no special characters", + f"last_name: in {language}, no special characters", "agent_id", ], },