Skip to content

Commit

Permalink
whitespace fix
Browse files Browse the repository at this point in the history
  • Loading branch information
kwindla committed Sep 29, 2024
1 parent 6e4c47c commit b8040c2
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 23 deletions.
27 changes: 12 additions & 15 deletions examples/foundational/07l-interruptible-together.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,29 +5,24 @@
#

import asyncio
import aiohttp
import os
import sys

import aiohttp
from dotenv import load_dotenv
from loguru import logger
from runner import configure

from pipecat.frames.frames import LLMMessagesFrame
from pipecat.pipeline.pipeline import Pipeline
from pipecat.pipeline.runner import PipelineRunner
from pipecat.pipeline.task import PipelineParams, PipelineTask
from pipecat.processors.aggregators.llm_response import (
LLMAssistantResponseAggregator,
LLMUserResponseAggregator,
)
from pipecat.services.ai_services import OpenAILLMContext
from pipecat.services.cartesia import CartesiaTTSService
from pipecat.services.together import TogetherLLMService
from pipecat.transports.services.daily import DailyParams, DailyTransport
from pipecat.vad.silero import SileroVADAnalyzer

from runner import configure

from loguru import logger

from dotenv import load_dotenv

load_dotenv(override=True)

logger.remove(0)
Expand Down Expand Up @@ -76,17 +71,19 @@ async def main():
},
]

tma_in = LLMUserResponseAggregator(messages)
tma_out = LLMAssistantResponseAggregator(messages)
context = OpenAILLMContext(messages, tools)
context_aggregator = llm.create_context_aggregator(context)
user_aggregator = context_aggregator.user()
assistant_aggregator = context_aggregator.assistant()

pipeline = Pipeline(
[
transport.input(), # Transport user input
tma_in, # User responses
user_aggregator, # User responses
llm, # LLM
tts, # TTS
transport.output(), # Transport bot output
tma_out, # Assistant spoken responses
assistant_aggregator, # Assistant spoken responses
]
)

Expand Down
15 changes: 7 additions & 8 deletions src/pipecat/processors/aggregators/llm_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@

from typing import List, Type

from pipecat.processors.aggregators.openai_llm_context import (
OpenAILLMContextFrame,
OpenAILLMContext,
)

from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
from pipecat.frames.frames import (
Frame,
InterimTranscriptionFrame,
Expand All @@ -22,11 +16,16 @@
LLMMessagesUpdateFrame,
LLMSetToolsFrame,
StartInterruptionFrame,
TranscriptionFrame,
TextFrame,
TranscriptionFrame,
UserStartedSpeakingFrame,
UserStoppedSpeakingFrame,
)
from pipecat.processors.aggregators.openai_llm_context import (
OpenAILLMContext,
OpenAILLMContextFrame,
)
from pipecat.processors.frame_processor import FrameDirection, FrameProcessor


class LLMResponseAggregator(FrameProcessor):
Expand Down Expand Up @@ -111,7 +110,7 @@ async def process_frame(self, frame: Frame, direction: FrameDirection):
await self.push_frame(frame, direction)
elif isinstance(frame, self._accumulator_frame):
if self._aggregating:
self._aggregation += f" {frame.text}" if self._aggregation else frame.text
self._aggregation += frame.text if self._aggregation else frame.text
# We have recevied a complete sentence, so if we have seen the
# end frame and we were still aggregating, it means we should
# send the aggregation.
Expand Down

0 comments on commit b8040c2

Please sign in to comment.