Skip to content

Commit

Permalink
Merge pull request #93 from daily-co/frame-name-cleanup
Browse files Browse the repository at this point in the history
Cleanup the last few badly-named Frame types
  • Loading branch information
Moishe authored Mar 28, 2024
2 parents 22bbede + 2732210 commit e7f9296
Show file tree
Hide file tree
Showing 26 changed files with 64 additions and 64 deletions.
4 changes: 2 additions & 2 deletions examples/foundational/02-llm-say-one-thing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import aiohttp

from dailyai.pipeline.frames import EndFrame, LLMMessagesQueueFrame
from dailyai.pipeline.frames import EndFrame, LLMMessagesFrame
from dailyai.pipeline.pipeline import Pipeline
from dailyai.transports.daily_transport import DailyTransport
from dailyai.services.elevenlabs_ai_service import ElevenLabsTTSService
Expand Down Expand Up @@ -49,7 +49,7 @@ async def main(room_url):

@transport.event_handler("on_first_other_participant_joined")
async def on_first_other_participant_joined(transport):
await pipeline.queue_frames([LLMMessagesQueueFrame(messages), EndFrame()])
await pipeline.queue_frames([LLMMessagesFrame(messages), EndFrame()])

await transport.run(pipeline)

Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/04-utterance-and-speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from dailyai.transports.daily_transport import DailyTransport
from dailyai.services.azure_ai_services import AzureLLMService, AzureTTSService
from dailyai.services.deepgram_ai_services import DeepgramTTSService
from dailyai.pipeline.frames import EndPipeFrame, LLMMessagesQueueFrame, TextFrame
from dailyai.pipeline.frames import EndPipeFrame, LLMMessagesFrame, TextFrame
from dailyai.services.elevenlabs_ai_service import ElevenLabsTTSService

from runner import configure
Expand Down Expand Up @@ -60,7 +60,7 @@ async def main(room_url: str):
# will run in parallel with generating and speaking the audio for static text, so there's no delay to
# speak the LLM response.
llm_pipeline = Pipeline([llm, elevenlabs_tts])
await llm_pipeline.queue_frames([LLMMessagesQueueFrame(messages), EndPipeFrame()])
await llm_pipeline.queue_frames([LLMMessagesFrame(messages), EndPipeFrame()])

simple_tts_pipeline = Pipeline([azure_tts])
await simple_tts_pipeline.queue_frames(
Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/05-sync-speech-and-image.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
TextFrame,
EndFrame,
ImageFrame,
LLMMessagesQueueFrame,
LLMMessagesFrame,
LLMResponseStartFrame,
)
from dailyai.pipeline.frame_processor import FrameProcessor
Expand Down Expand Up @@ -133,7 +133,7 @@ async def main(room_url):
}
]
frames.append(MonthFrame(month))
frames.append(LLMMessagesQueueFrame(messages))
frames.append(LLMMessagesFrame(messages))

frames.append(EndFrame())
await pipeline.queue_frames(frames)
Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/06-listen-and-respond.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import aiohttp
import logging
import os
from dailyai.pipeline.frames import LLMMessagesQueueFrame
from dailyai.pipeline.frames import LLMMessagesFrame
from dailyai.pipeline.pipeline import Pipeline

from dailyai.transports.daily_transport import DailyTransport
Expand Down Expand Up @@ -76,7 +76,7 @@ async def on_first_other_participant_joined(transport):
# Kick off the conversation.
messages.append(
{"role": "system", "content": "Please introduce yourself to the user."})
await pipeline.queue_frames([LLMMessagesQueueFrame(messages)])
await pipeline.queue_frames([LLMMessagesFrame(messages)])

transport.transcription_settings["extra"]["endpointing"] = True
transport.transcription_settings["extra"]["punctuate"] = True
Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/08-bots-arguing.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from dailyai.services.azure_ai_services import AzureLLMService, AzureTTSService
from dailyai.services.elevenlabs_ai_service import ElevenLabsTTSService
from dailyai.services.fal_ai_services import FalImageGenService
from dailyai.pipeline.frames import AudioFrame, EndFrame, ImageFrame, LLMMessagesQueueFrame, TextFrame
from dailyai.pipeline.frames import AudioFrame, EndFrame, ImageFrame, LLMMessagesFrame, TextFrame

from runner import configure

Expand Down Expand Up @@ -80,7 +80,7 @@ async def get_text_and_audio(messages) -> Tuple[str, bytearray]:
[llm, sentence_aggregator, tts1], source_queue, sink_queue
)

await source_queue.put(LLMMessagesQueueFrame(messages))
await source_queue.put(LLMMessagesFrame(messages))
await source_queue.put(EndFrame())
await pipeline.run_pipeline()

Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/10-wake-word.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
TextFrame,
ImageFrame,
SpriteFrame,
TranscriptionQueueFrame,
TranscriptionFrame,
)
from dailyai.services.ai_services import AIService

Expand Down Expand Up @@ -76,7 +76,7 @@ def __init__(self, bot_participant_id=None):
self.bot_participant_id = bot_participant_id

async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
if isinstance(frame, TranscriptionQueueFrame):
if isinstance(frame, TranscriptionFrame):
if frame.participantId != self.bot_participant_id:
yield frame

Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/11-sound-effects.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
Frame,
AudioFrame,
LLMResponseEndFrame,
LLMMessagesQueueFrame,
LLMMessagesFrame,
)
from typing import AsyncGenerator

Expand Down Expand Up @@ -62,7 +62,7 @@ def __init__(self):
pass

async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
if isinstance(frame, LLMMessagesQueueFrame):
if isinstance(frame, LLMMessagesFrame):
yield AudioFrame(sounds["ding2.wav"])
# In case anything else up the stack needs it
yield frame
Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/13a-whisper-local.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import argparse
import asyncio
import logging
from dailyai.pipeline.frames import EndFrame, TranscriptionQueueFrame
from dailyai.pipeline.frames import EndFrame, TranscriptionFrame

from dailyai.transports.local_transport import LocalTransport
from dailyai.services.whisper_ai_services import WhisperSTTService
Expand Down Expand Up @@ -32,7 +32,7 @@ async def handle_transcription():
while not transport_done.is_set():
item = await transcription_output_queue.get()
print("got item from queue", item)
if isinstance(item, TranscriptionQueueFrame):
if isinstance(item, TranscriptionFrame):
print(item.text)
elif isinstance(item, EndFrame):
break
Expand Down
4 changes: 2 additions & 2 deletions examples/foundational/websocket-server/sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
import os
from dailyai.pipeline.frame_processor import FrameProcessor
from dailyai.pipeline.frames import TextFrame, TranscriptionQueueFrame
from dailyai.pipeline.frames import TextFrame, TranscriptionFrame
from dailyai.pipeline.pipeline import Pipeline
from dailyai.services.elevenlabs_ai_service import ElevenLabsTTSService
from dailyai.transports.websocket_transport import WebsocketTransport
Expand All @@ -16,7 +16,7 @@

class WhisperTranscriber(FrameProcessor):
async def process_frame(self, frame):
if isinstance(frame, TranscriptionQueueFrame):
if isinstance(frame, TranscriptionFrame):
print(f"Transcribed: {frame.text}")
else:
yield frame
Expand Down
4 changes: 2 additions & 2 deletions examples/internal/11a-dial-out.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from dailyai.services.azure_ai_services import AzureLLMService, AzureTTSService
from dailyai.pipeline.aggregators import LLMContextAggregator
from dailyai.services.ai_services import AIService, FrameLogger
from dailyai.pipeline.frames import Frame, AudioFrame, LLMResponseEndFrame, LLMMessagesQueueFrame
from dailyai.pipeline.frames import Frame, AudioFrame, LLMResponseEndFrame, LLMMessagesFrame
from typing import AsyncGenerator

from runner import configure
Expand Down Expand Up @@ -51,7 +51,7 @@ def __init__(self):
pass

async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
if isinstance(frame, LLMMessagesQueueFrame):
if isinstance(frame, LLMMessagesFrame):
yield AudioFrame(sounds["ding2.wav"])
# In case anything else up the stack needs it
yield frame
Expand Down
4 changes: 2 additions & 2 deletions examples/starter-apps/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
SpriteFrame,
Frame,
LLMResponseEndFrame,
LLMMessagesQueueFrame,
LLMMessagesFrame,
AudioFrame,
PipelineStartedFrame,
)
Expand Down Expand Up @@ -129,7 +129,7 @@ async def main(room_url: str, token):
@transport.event_handler("on_first_other_participant_joined")
async def on_first_other_participant_joined(transport):
print(f"!!! in here, pipeline.source is {pipeline.source}")
await pipeline.queue_frames([LLMMessagesQueueFrame(messages)])
await pipeline.queue_frames([LLMMessagesFrame(messages)])

async def run_conversation():

Expand Down
6 changes: 3 additions & 3 deletions examples/starter-apps/storybot.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
)
from dailyai.pipeline.frames import (
EndPipeFrame,
LLMMessagesQueueFrame,
LLMMessagesFrame,
Frame,
TextFrame,
LLMResponseEndFrame,
Expand Down Expand Up @@ -172,7 +172,7 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
prompt = f"You are an illustrator for a children's story book. Here is the story so far:\n\n\"{' '.join(self._story[:-1])}\"\n\nGenerate a prompt for DALL-E to create an illustration for the next page. Here's the sentence for the next page:\n\n\"{self._story[-1:][0]}\"\n\n Your response should start with the phrase \"Children's book illustration of\"."
msgs = [{"role": "system", "content": prompt}]
image_prompt = ""
async for f in self._llm.process_frame(LLMMessagesQueueFrame(msgs)):
async for f in self._llm.process_frame(LLMMessagesFrame(msgs)):
if isinstance(f, TextFrame):
image_prompt += f.text
async for f in self._img.process_frame(TextFrame(image_prompt)):
Expand Down Expand Up @@ -253,7 +253,7 @@ async def storytime():
await local_pipeline.queue_frames(
[
ImageFrame(None, images["grandma-listening.png"]),
LLMMessagesQueueFrame(intro_messages),
LLMMessagesFrame(intro_messages),
AudioFrame(sounds["listening.wav"]),
EndPipeFrame(),
]
Expand Down
4 changes: 2 additions & 2 deletions examples/starter-apps/translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from dailyai.pipeline.aggregators import (
SentenceAggregator,
)
from dailyai.pipeline.frames import Frame, LLMMessagesQueueFrame, TextFrame
from dailyai.pipeline.frames import Frame, LLMMessagesFrame, TextFrame
from dailyai.pipeline.frame_processor import FrameProcessor
from dailyai.pipeline.pipeline import Pipeline
from dailyai.transports.daily_transport import DailyTransport
Expand Down Expand Up @@ -44,7 +44,7 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
},
{"role": "user", "content": frame.text},
]
yield LLMMessagesQueueFrame(context)
yield LLMMessagesFrame(context)
else:
yield frame

Expand Down
20 changes: 10 additions & 10 deletions src/dailyai/pipeline/aggregators.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
EndFrame,
EndPipeFrame,
Frame,
LLMMessagesQueueFrame,
LLMMessagesFrame,
LLMResponseEndFrame,
LLMResponseStartFrame,
TextFrame,
TranscriptionQueueFrame,
TranscriptionFrame,
UserStartedSpeakingFrame,
UserStoppedSpeakingFrame,
)
Expand Down Expand Up @@ -57,7 +57,7 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
{"role": self._role, "content": self.aggregation})
self.aggregation = ""
yield self._end_frame()
yield LLMMessagesQueueFrame(self.messages)
yield LLMMessagesFrame(self.messages)
elif isinstance(frame, self._accumulator_frame) and self.aggregating:
self.aggregation += f" {frame.text}"
if self._pass_through:
Expand All @@ -84,7 +84,7 @@ def __init__(self, messages: list[dict]):
role="user",
start_frame=UserStartedSpeakingFrame,
end_frame=UserStoppedSpeakingFrame,
accumulator_frame=TranscriptionQueueFrame,
accumulator_frame=TranscriptionFrame,
pass_through=False,
)

Expand Down Expand Up @@ -114,7 +114,7 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
return

# Ignore transcription frames from the bot
if isinstance(frame, TranscriptionQueueFrame):
if isinstance(frame, TranscriptionFrame):
if frame.participantId == self.bot_participant_id:
return

Expand All @@ -126,19 +126,19 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:

# TODO: split up transcription by participant
if self.complete_sentences:
# type: ignore -- the linter thinks this isn't a TextQueueFrame, even
# type: ignore -- the linter thinks this isn't a TextFrame, even
# though we check it above
self.sentence += frame.text
if self.sentence.endswith((".", "?", "!")):
self.messages.append(
{"role": self.role, "content": self.sentence})
self.sentence = ""
yield LLMMessagesQueueFrame(self.messages)
yield LLMMessagesFrame(self.messages)
else:
# type: ignore -- the linter thinks this isn't a TextQueueFrame, even
# type: ignore -- the linter thinks this isn't a TextFrame, even
# though we check it above
self.messages.append({"role": self.role, "content": frame.text})
yield LLMMessagesQueueFrame(self.messages)
yield LLMMessagesFrame(self.messages)


class LLMUserContextAggregator(LLMContextAggregator):
Expand Down Expand Up @@ -334,7 +334,7 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:
continue
seen_ids.add(id(frame))

# Skip passing along EndParallelPipeQueueFrame, because we use them
# Skip passing along EndPipeFrame, because we use them
# for our own flow control.
if not isinstance(frame, EndPipeFrame):
yield frame
Expand Down
4 changes: 2 additions & 2 deletions src/dailyai/pipeline/frame_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ class FrameProcessor:
By convention, FrameProcessors should immediately yield any frames they don't process.
Stateful FrameProcessors should watch for the EndStreamQueueFrame and finalize their
Stateful FrameProcessors should watch for the EndFrame and finalize their
output, eg. yielding an unfinished sentence if they're aggregating LLM output to full
sentences. EndStreamQueueFrame is also a chance to clean up any services that need to
sentences. EndFrame is also a chance to clean up any services that need to
be closed, del'd, etc.
"""

Expand Down
6 changes: 3 additions & 3 deletions src/dailyai/pipeline/frames.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def __str__(self):


@dataclass()
class TranscriptionQueueFrame(TextFrame):
class TranscriptionFrame(TextFrame):
"""A text frame with transcription-specific data. Will be placed in the
transport's receive queue when a participant speaks."""
participantId: str
Expand All @@ -126,7 +126,7 @@ class TTSEndFrame(ControlFrame):


@dataclass()
class LLMMessagesQueueFrame(Frame):
class LLMMessagesFrame(Frame):
"""A frame containing a list of LLM messages. Used to signal that an LLM
service should run a chat completion and emit an LLMStartFrames, TextFrames
and an LLMEndFrame.
Expand All @@ -137,7 +137,7 @@ class LLMMessagesQueueFrame(Frame):

@dataclass()
class OpenAILLMContextFrame(Frame):
"""Like an LLMMessagesQueueFrame, but with extra context specific to the
"""Like an LLMMessagesFrame, but with extra context specific to the
OpenAI API. The context in this message is also mutable, and will be
changed by the OpenAIContextAggregator frame processor."""
context: OpenAILLMContext
Expand Down
4 changes: 2 additions & 2 deletions src/dailyai/pipeline/opeanai_llm_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
LLMResponseStartFrame,
OpenAILLMContextFrame,
TextFrame,
TranscriptionQueueFrame,
TranscriptionFrame,
UserStartedSpeakingFrame,
UserStoppedSpeakingFrame,
)
Expand Down Expand Up @@ -90,7 +90,7 @@ def __init__(self, context: OpenAILLMContext):
role="user",
start_frame=UserStartedSpeakingFrame,
end_frame=UserStoppedSpeakingFrame,
accumulator_frame=TranscriptionQueueFrame,
accumulator_frame=TranscriptionFrame,
pass_through=False,
)

Expand Down
4 changes: 2 additions & 2 deletions src/dailyai/pipeline/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ async def run_pipeline(self):
The source and sink queues must be set before calling this method.
This method will exit when an EndStreamQueueFrame is placed on the sink queue.
No more frames will be placed on the sink queue after an EndStreamQueueFrame, even
This method will exit when an EndFrame is placed on the sink queue.
No more frames will be placed on the sink queue after an EndFrame, even
if it's not the last frame yielded by the last frame_processor in the pipeline..
"""

Expand Down
Loading

0 comments on commit e7f9296

Please sign in to comment.