Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

no longer necessary to call AIService super().start/stop/cancel(frame) #851

Closed
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Changed

- It's no longer necessary to call `super().start/stop/cancel(frame)` if you
subclass and implement `AIService.start/stop/cancel()`. This is all now done
internally and will avoid possible issues if you forget to add it.

- It's no longer necessary to call `super().process_frame(frame, direction)` if
you subclass and implement `FrameProcessor.process_frame()`. This is all now
done internally and will avoid possible issues if you forget to add it.
Expand Down
18 changes: 12 additions & 6 deletions src/pipecat/services/ai_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,11 @@ async def _update_settings(self, settings: Dict[str, Any]):

async def process_frame(self, frame: Frame, direction: FrameDirection):
if isinstance(frame, StartFrame):
await self.start(frame)
await self._start(frame)
elif isinstance(frame, CancelFrame):
await self.cancel(frame)
await self._cancel(frame)
elif isinstance(frame, EndFrame):
await self.stop(frame)
await self._stop(frame)

async def process_generator(self, generator: AsyncGenerator[Frame | None, None]):
async for f in generator:
Expand All @@ -125,6 +125,15 @@ async def process_generator(self, generator: AsyncGenerator[Frame | None, None])
else:
await self.push_frame(f)

async def _start(self, frame: StartFrame):
await self.start(frame)

async def _stop(self, frame: EndFrame):
await self.stop(frame)

async def _cancel(self, frame: CancelFrame):
await self.cancel(frame)


class LLMService(AIService):
"""This class is a no-op but serves as a base class for LLM services."""
Expand Down Expand Up @@ -248,19 +257,16 @@ async def run_tts(self, text: str) -> AsyncGenerator[Frame, None]:
pass

async def start(self, frame: StartFrame):
await super().start(frame)
if self._push_stop_frames:
self._stop_frame_task = self.get_event_loop().create_task(self._stop_frame_handler())

async def stop(self, frame: EndFrame):
await super().stop(frame)
if self._stop_frame_task:
self._stop_frame_task.cancel()
await self._stop_frame_task
self._stop_frame_task = None

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
if self._stop_frame_task:
self._stop_frame_task.cancel()
await self._stop_frame_task
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/assemblyai.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,12 @@ async def set_language(self, language: Language):
self._settings["language"] = language

async def start(self, frame: StartFrame):
await super().start(frame)
await self._connect()

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._disconnect()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._disconnect()

async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -676,16 +676,13 @@ async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
yield None

async def start(self, frame: StartFrame):
await super().start(frame)
self._speech_recognizer.start_continuous_recognition_async()

async def stop(self, frame: EndFrame):
await super().stop(frame)
self._speech_recognizer.stop_continuous_recognition_async()
self._audio_stream.close()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
self._speech_recognizer.stop_continuous_recognition_async()
self._audio_stream.close()

Expand Down
2 changes: 0 additions & 2 deletions src/pipecat/services/canonical.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,9 @@ def __init__(
self._output_dir = output_dir

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._process_audio()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._process_audio()

async def process_frame(self, frame: Frame, direction: FrameDirection):
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/deepgram.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,12 @@ async def set_language(self, language: Language):
await self._connect()

async def start(self, frame: StartFrame):
await super().start(frame)
await self._connect()

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._disconnect()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._disconnect()

async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/gemini_multimodal_live/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,15 +229,12 @@ async def set_context(self, context: OpenAILLMContext):
#

async def start(self, frame: StartFrame):
await super().start(frame)
await self._connect()

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._disconnect()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._disconnect()

#
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/gladia.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,18 +177,15 @@ def language_to_service_language(self, language: Language) -> str | None:
return language_to_gladia_language(language)

async def start(self, frame: StartFrame):
await super().start(frame)
response = await self._setup_gladia()
self._websocket = await websockets.connect(response["url"])
self._receive_task = self.get_event_loop().create_task(self._receive_task_handler())

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._send_stop_recording()
await self._websocket.close()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._websocket.close()

async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/openai_realtime_beta/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,12 @@ def set_audio_input_paused(self, paused: bool):
#

async def start(self, frame: StartFrame):
await super().start(frame)
await self._connect()

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._disconnect()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._disconnect()

#
Expand Down
3 changes: 0 additions & 3 deletions src/pipecat/services/riva.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,17 +187,14 @@ def can_generate_metrics(self) -> bool:
return False

async def start(self, frame: StartFrame):
await super().start(frame)
self._thread_task = self.get_event_loop().create_task(self._thread_task_handler())
self._response_task = self.get_event_loop().create_task(self._response_task_handler())
self._response_queue = asyncio.Queue()

async def stop(self, frame: EndFrame):
await super().stop(frame)
await self._stop_tasks()

async def cancel(self, frame: CancelFrame):
await super().cancel(frame)
await self._stop_tasks()

async def _stop_tasks(self):
Expand Down
Loading