Skip to content

Commit

Permalink
missing no longer necessary to call super().process_frame(frame, dire…
Browse files Browse the repository at this point in the history
…ction)
  • Loading branch information
aconchillo committed Dec 12, 2024
1 parent 3f3a853 commit 06043ce
Show file tree
Hide file tree
Showing 16 changed files with 0 additions and 53 deletions.
10 changes: 0 additions & 10 deletions src/pipecat/services/ai_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,8 +292,6 @@ async def say(self, text: str):
await self.queue_frame(TTSSpeakFrame(text))

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, TextFrame):
await self._process_text_frame(frame)
elif isinstance(frame, StartInterruptionFrame):
Expand Down Expand Up @@ -410,8 +408,6 @@ async def cancel(self, frame: CancelFrame):
await self._stop_words_task()

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, (LLMFullResponseEndFrame, EndFrame)):
await self.flush_audio()

Expand Down Expand Up @@ -498,8 +494,6 @@ async def process_audio_frame(self, frame: AudioRawFrame):

async def process_frame(self, frame: Frame, direction: FrameDirection):
"""Processes a frame of audio data, either buffering or transcribing it."""
await super().process_frame(frame, direction)

if isinstance(frame, AudioRawFrame):
# In this service we accumulate audio internally and at the end we
# push a TextFrame. We also push audio downstream in case someone
Expand Down Expand Up @@ -597,8 +591,6 @@ async def run_image_gen(self, prompt: str) -> AsyncGenerator[Frame, None]:
pass

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, TextFrame):
await self.push_frame(frame, direction)
await self.start_processing_metrics()
Expand All @@ -620,8 +612,6 @@ async def run_vision(self, frame: VisionImageRawFrame) -> AsyncGenerator[Frame,
pass

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, VisionImageRawFrame):
await self.start_processing_metrics()
await self.process_generator(self.run_vision(frame))
Expand Down
4 changes: 0 additions & 4 deletions src/pipecat/services/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,8 +270,6 @@ async def _process_context(self, context: OpenAILLMContext):
)

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

context = None
if isinstance(frame, OpenAILLMContextFrame):
context: "AnthropicLLMContext" = AnthropicLLMContext.upgrade_to_anthropic(frame.context)
Expand Down Expand Up @@ -611,7 +609,6 @@ def __init__(self, context: OpenAILLMContext | AnthropicLLMContext):
self._context = AnthropicLLMContext.from_openai_context(context)

async def process_frame(self, frame, direction):
await super().process_frame(frame, direction)
# Our parent method has already called push_frame(). So we can't interrupt the
# flow here and we don't need to call push_frame() ourselves. Possibly something
# to talk through (tagging @aleix). At some point we might need to refactor these
Expand Down Expand Up @@ -664,7 +661,6 @@ def __init__(self, user_context_aggregator: AnthropicUserContextAggregator, **kw
self._pending_image_frame_message = None

async def process_frame(self, frame, direction):
await super().process_frame(frame, direction)
# See note above about not calling push_frame() here.
if isinstance(frame, StartInterruptionFrame):
self._function_call_in_progress = None
Expand Down
1 change: 0 additions & 1 deletion src/pipecat/services/canonical.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ async def cancel(self, frame: CancelFrame):
await self._process_audio()

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)
await self.push_frame(frame, direction)

async def _process_audio(self):
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/cartesia.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,6 @@ async def _receive_task_handler(self):
await self._connect_websocket()

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

# If we received a TTSSpeakFrame and the LLM response included text (it
# might be that it's only a function calling response) we pause
# processing more frames until we receive a BotStoppedSpeakingFrame.
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/elevenlabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,6 @@ async def push_frame(self, frame: Frame, direction: FrameDirection = FrameDirect
await self.add_word_timestamps([("LLMFullResponseEndFrame", 0), ("Reset", 0)])

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

# If we received a TTSSpeakFrame and the LLM response included text (it
# might be that it's only a function calling response) we pause
# processing more frames until we receive a BotStoppedSpeakingFrame.
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/gemini_multimodal_live/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ def get_messages_for_initializing_history(self):

class GeminiMultimodalLiveUserContextAggregator(OpenAIUserContextAggregator):
async def process_frame(self, frame, direction):
await super().process_frame(frame, direction)
# kind of a hack just to pass the LLMMessagesAppendFrame through, but it's fine for now
if isinstance(frame, LLMMessagesAppendFrame):
await self.push_frame(frame, direction)
Expand Down Expand Up @@ -305,8 +304,6 @@ async def _transcribe_audio(self, audio, context):
#

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

# logger.debug(f"Processing frame: {frame}")

if isinstance(frame, TranscriptionFrame):
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,8 +652,6 @@ async def _process_context(self, context: OpenAILLMContext):
await self.push_frame(LLMFullResponseEndFrame())

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

context = None

if isinstance(frame, OpenAILLMContextFrame):
Expand Down
4 changes: 0 additions & 4 deletions src/pipecat/services/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,6 @@ async def _process_context(self, context: OpenAILLMContext):
)

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

context = None
if isinstance(frame, OpenAILLMContextFrame):
context: OpenAILLMContext = frame.context
Expand Down Expand Up @@ -475,7 +473,6 @@ def __init__(self, context: OpenAILLMContext):
super().__init__(context=context)

async def process_frame(self, frame, direction):
await super().process_frame(frame, direction)
# Our parent method has already called push_frame(). So we can't interrupt the
# flow here and we don't need to call push_frame() ourselves.
try:
Expand Down Expand Up @@ -516,7 +513,6 @@ def __init__(self, user_context_aggregator: OpenAIUserContextAggregator, **kwarg
self._pending_image_frame_message = None

async def process_frame(self, frame, direction):
await super().process_frame(frame, direction)
# See note above about not calling push_frame() here.
if isinstance(frame, StartInterruptionFrame):
self._function_calls_in_progress.clear()
Expand Down
1 change: 0 additions & 1 deletion src/pipecat/services/openai_realtime_beta/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ class OpenAIRealtimeUserContextAggregator(OpenAIUserContextAggregator):
async def process_frame(
self, frame: Frame, direction: FrameDirection = FrameDirection.DOWNSTREAM
):
await super().process_frame(frame, direction)
# Parent does not push LLMMessagesUpdateFrame. This ensures that in a typical pipeline,
# messages are only processed by the user context aggregator, which is generally what we want. But
# we also need to send new messages over the websocket, so the openai realtime API has them
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/openai_realtime_beta/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,6 @@ async def _truncate_current_audio_response(self):
#

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, TranscriptionFrame):
pass
elif isinstance(frame, OpenAILLMContextFrame):
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/playht.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,6 @@ async def _receive_task_handler(self):
await self._connect_websocket()

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

# If we received a TTSSpeakFrame and the LLM response included text (it
# might be that it's only a function calling response) we pause
# processing more frames until we receive a BotStoppedSpeakingFrame.
Expand Down
1 change: 0 additions & 1 deletion src/pipecat/services/tavus.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ async def _encode_audio_and_send(
await self._send_audio_message(audio_base64, done=done)

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)
if isinstance(frame, TTSStartedFrame):
await self.start_processing_metrics()
await self.start_ttfb_metrics()
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/transports/network/fastapi_websocket.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,6 @@ def __init__(self, websocket: WebSocket, params: FastAPIWebsocketParams, **kwarg
self._next_send_time = 0

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, StartInterruptionFrame):
await self._write_frame(frame)
self._next_send_time = 0
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/transports/network/websocket_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,6 @@ async def set_client_connection(self, websocket: websockets.WebSocketServerProto
self._websocket = websocket

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, StartInterruptionFrame):
await self._write_frame(frame)
self._next_send_time = 0
Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/transports/services/daily.py
Original file line number Diff line number Diff line change
Expand Up @@ -727,8 +727,6 @@ def vad_analyzer(self) -> VADAnalyzer | None:
#

async def process_frame(self, frame: Frame, direction: FrameDirection):
await super().process_frame(frame, direction)

if isinstance(frame, UserImageRequestFrame):
await self.request_participant_image(frame.user_id)

Expand Down
13 changes: 0 additions & 13 deletions src/pipecat/transports/services/livekit.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
AudioRawFrame,
CancelFrame,
EndFrame,
Frame,
InputAudioRawFrame,
OutputAudioRawFrame,
StartFrame,
Expand Down Expand Up @@ -334,12 +333,6 @@ async def stop(self, frame: EndFrame):
await self._client.disconnect()
logger.info("LiveKitInputTransport stopped")

async def process_frame(self, frame: Frame, direction: FrameDirection):
if isinstance(frame, EndFrame):
await self.stop(frame)
else:
await super().process_frame(frame, direction)

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._client.disconnect()
Expand Down Expand Up @@ -411,12 +404,6 @@ async def stop(self, frame: EndFrame):
await self._client.disconnect()
logger.info("LiveKitOutputTransport stopped")

async def process_frame(self, frame: Frame, direction: FrameDirection):
if isinstance(frame, EndFrame):
await self.stop(frame)
else:
await super().process_frame(frame, direction)

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._client.disconnect()
Expand Down

0 comments on commit 06043ce

Please sign in to comment.