diff --git a/examples/foundational/07l-interruptible-together.py b/examples/foundational/07l-interruptible-together.py index e2cb55fed..a7086c941 100644 --- a/examples/foundational/07l-interruptible-together.py +++ b/examples/foundational/07l-interruptible-together.py @@ -5,29 +5,24 @@ # import asyncio -import aiohttp import os import sys +import aiohttp +from dotenv import load_dotenv +from loguru import logger +from runner import configure + from pipecat.frames.frames import LLMMessagesFrame from pipecat.pipeline.pipeline import Pipeline from pipecat.pipeline.runner import PipelineRunner from pipecat.pipeline.task import PipelineParams, PipelineTask -from pipecat.processors.aggregators.llm_response import ( - LLMAssistantResponseAggregator, - LLMUserResponseAggregator, -) +from pipecat.services.ai_services import OpenAILLMContext from pipecat.services.cartesia import CartesiaTTSService from pipecat.services.together import TogetherLLMService from pipecat.transports.services.daily import DailyParams, DailyTransport from pipecat.vad.silero import SileroVADAnalyzer -from runner import configure - -from loguru import logger - -from dotenv import load_dotenv - load_dotenv(override=True) logger.remove(0) @@ -76,17 +71,19 @@ async def main(): }, ] - tma_in = LLMUserResponseAggregator(messages) - tma_out = LLMAssistantResponseAggregator(messages) + context = OpenAILLMContext(messages, tools) + context_aggregator = llm.create_context_aggregator(context) + user_aggregator = context_aggregator.user() + assistant_aggregator = context_aggregator.assistant() pipeline = Pipeline( [ transport.input(), # Transport user input - tma_in, # User responses + user_aggregator, # User responses llm, # LLM tts, # TTS transport.output(), # Transport bot output - tma_out, # Assistant spoken responses + assistant_aggregator, # Assistant spoken responses ] ) diff --git a/src/pipecat/processors/aggregators/llm_response.py b/src/pipecat/processors/aggregators/llm_response.py index 036f5fe47..a3cd63cbd 100644 --- a/src/pipecat/processors/aggregators/llm_response.py +++ b/src/pipecat/processors/aggregators/llm_response.py @@ -6,12 +6,6 @@ from typing import List, Type -from pipecat.processors.aggregators.openai_llm_context import ( - OpenAILLMContextFrame, - OpenAILLMContext, -) - -from pipecat.processors.frame_processor import FrameDirection, FrameProcessor from pipecat.frames.frames import ( Frame, InterimTranscriptionFrame, @@ -22,11 +16,16 @@ LLMMessagesUpdateFrame, LLMSetToolsFrame, StartInterruptionFrame, - TranscriptionFrame, TextFrame, + TranscriptionFrame, UserStartedSpeakingFrame, UserStoppedSpeakingFrame, ) +from pipecat.processors.aggregators.openai_llm_context import ( + OpenAILLMContext, + OpenAILLMContextFrame, +) +from pipecat.processors.frame_processor import FrameDirection, FrameProcessor class LLMResponseAggregator(FrameProcessor): @@ -111,7 +110,7 @@ async def process_frame(self, frame: Frame, direction: FrameDirection): await self.push_frame(frame, direction) elif isinstance(frame, self._accumulator_frame): if self._aggregating: - self._aggregation += f" {frame.text}" if self._aggregation else frame.text + self._aggregation += frame.text if self._aggregation else frame.text # We have recevied a complete sentence, so if we have seen the # end frame and we were still aggregating, it means we should # send the aggregation.