From f3fd312b8353bd9211fbe7c48bed0b0e8ad70c36 Mon Sep 17 00:00:00 2001 From: Mark Backman Date: Fri, 20 Sep 2024 17:42:58 -0400 Subject: [PATCH] Add Together AI interruptible example --- .../07l-interruptible-together.py | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 examples/foundational/07l-interruptible-together.py diff --git a/examples/foundational/07l-interruptible-together.py b/examples/foundational/07l-interruptible-together.py new file mode 100644 index 000000000..41befb67f --- /dev/null +++ b/examples/foundational/07l-interruptible-together.py @@ -0,0 +1,100 @@ +# +# Copyright (c) 2024, Daily +# +# SPDX-License-Identifier: BSD 2-Clause License +# + +import asyncio +import aiohttp +import os +import sys + +from pipecat.frames.frames import LLMMessagesFrame +from pipecat.pipeline.pipeline import Pipeline +from pipecat.pipeline.runner import PipelineRunner +from pipecat.pipeline.task import PipelineParams, PipelineTask +from pipecat.processors.aggregators.llm_response import ( + LLMAssistantResponseAggregator, LLMUserResponseAggregator) +from pipecat.services.cartesia import CartesiaTTSService +from pipecat.services.together import TogetherLLMService +from pipecat.transports.services.daily import DailyParams, DailyTransport +from pipecat.vad.silero import SileroVADAnalyzer + +from runner import configure + +from loguru import logger + +from dotenv import load_dotenv +load_dotenv(override=True) + +logger.remove(0) +logger.add(sys.stderr, level="DEBUG") + + +async def main(): + async with aiohttp.ClientSession() as session: + (room_url, token) = await configure(session) + + transport = DailyTransport( + room_url, + token, + "Respond bot", + DailyParams( + audio_out_enabled=True, + transcription_enabled=True, + vad_enabled=True, + vad_analyzer=SileroVADAnalyzer() + ) + ) + + tts = CartesiaTTSService( + api_key=os.getenv("CARTESIA_API_KEY"), + voice_id="79a125e8-cd45-4c13-8a67-188112f4dd22", # British Lady + ) + + llm = TogetherLLMService( + api_key=os.getenv("TOGETHER_API_KEY"), + model=os.getenv("TOGETHER_MODEL"), + params=TogetherLLMService.InputParams( + temperature=1.0, + frequency_penalty=2.0, + presence_penalty=0.0, + top_p=0.9, + top_k=40 + ) + ) + + messages = [ + { + "role": "system", + "content": "You are a helpful LLM in a WebRTC call. Your goal is to demonstrate your capabilities in a succinct way. Your output will be converted to audio so don't include special characters in your answers. Respond to what the user said in a creative and helpful way.", + }, + ] + + tma_in = LLMUserResponseAggregator(messages) + tma_out = LLMAssistantResponseAggregator(messages) + + pipeline = Pipeline([ + transport.input(), # Transport user input + tma_in, # User responses + llm, # LLM + tts, # TTS + transport.output(), # Transport bot output + tma_out # Assistant spoken responses + ]) + + task = PipelineTask(pipeline, PipelineParams(allow_interruptions=True)) + + @transport.event_handler("on_first_participant_joined") + async def on_first_participant_joined(transport, participant): + transport.capture_participant_transcription(participant["id"]) + # Kick off the conversation. + await task.queue_frames([LLMMessagesFrame(messages)]) + + runner = PipelineRunner() + + await runner.run(task) + + +if __name__ == "__main__": + asyncio.run(main())