Skip to content

Commit

Permalink
Update apps
Browse files Browse the repository at this point in the history
  • Loading branch information
Moishe committed Mar 15, 2024
1 parent b8b35db commit 18bf26d
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 30 deletions.
12 changes: 8 additions & 4 deletions src/dailyai/services/azure_ai_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,17 @@ async def run_tts(self, sentence) -> AsyncGenerator[bytes, None]:

class AzureLLMService(BaseOpenAILLMService):
def __init__(self, *, api_key, endpoint, api_version="2023-12-01-preview", model):
super().__init__(model)
self._endpoint = endpoint
self._api_version = api_version

super().__init__(api_key=api_key, model=model)
self._model: str = model

# This overrides the client created by the super class init
def create_client(self, api_key=None, base_url=None):
self._client = AsyncAzureOpenAI(
api_key=api_key,
azure_endpoint=endpoint,
api_version=api_version,
azure_endpoint=self._endpoint,
api_version=self._api_version,
)


Expand Down
13 changes: 9 additions & 4 deletions src/dailyai/services/base_transport_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,10 +151,9 @@ async def run(self, pipeline:Pipeline | None=None, override_pipeline_source_queu

pipeline_task = None
if pipeline:
pipeline.set_sink(self.send_queue)
if override_pipeline_source_queue:
pipeline.set_source(self.receive_queue)
pipeline_task = asyncio.create_task(pipeline.run_pipeline())
pipeline_task = asyncio.create_task(
self.run_pipeline(pipeline, override_pipeline_source_queue)
)

try:
while time.time() < self._expiration and not self._stop_threads.is_set():
Expand Down Expand Up @@ -182,6 +181,12 @@ async def run(self, pipeline:Pipeline | None=None, override_pipeline_source_queu
if self._vad_enabled:
self._vad_thread.join()

async def run_pipeline(self, pipeline:Pipeline, override_pipeline_source_queue=True):
pipeline.set_sink(self.send_queue)
if override_pipeline_source_queue:
pipeline.set_source(self.receive_queue)
await pipeline.run_pipeline()

async def run_interruptible_pipeline(
self,
pipeline: Pipeline,
Expand Down
3 changes: 3 additions & 0 deletions src/dailyai/services/openai_api_llm_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ class BaseOpenAILLMService(LLMService):
def __init__(self, model: str, api_key=None, base_url=None):
super().__init__()
self._model: str = model
self.create_client(api_key=api_key, base_url=base_url)

def create_client(self, api_key=None, base_url=None):
self._client = AsyncOpenAI(api_key=api_key, base_url=base_url)

async def _stream_chat_completions(
Expand Down
7 changes: 1 addition & 6 deletions src/examples/starter-apps/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,25 +6,20 @@
from typing import AsyncGenerator

from dailyai.pipeline.aggregators import (
LLMAssistantContextAggregator,
LLMResponseAggregator,
LLMUserContextAggregator,
UserResponseAggregator,
)
from dailyai.pipeline.frames import (
ImageFrame,
SpriteFrame,
Frame,
LLMResponseEndFrame,
LLMResponseStartFrame,
LLMMessagesQueueFrame,
UserStartedSpeakingFrame,
AudioFrame,
PipelineStartedFrame,
)
from dailyai.services.ai_services import AIService
from dailyai.pipeline.pipeline import Pipeline
from dailyai.services.ai_services import FrameLogger
from dailyai.services.daily_transport_service import DailyTransportService
from dailyai.services.open_ai_services import OpenAILLMService
from dailyai.services.elevenlabs_ai_service import ElevenLabsTTSService
Expand Down Expand Up @@ -130,7 +125,7 @@ async def main(room_url: str, token):
@transport.event_handler("on_first_other_participant_joined")
async def on_first_other_participant_joined(transport):
print(f"!!! in here, pipeline.source is {pipeline.source}")
await pipeline.queue_frames(LLMMessagesQueueFrame(messages))
await pipeline.queue_frames([LLMMessagesQueueFrame(messages)])

async def run_conversation():

Expand Down
33 changes: 17 additions & 16 deletions src/examples/starter-apps/storybot.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
)
from examples.support.runner import configure
from dailyai.pipeline.frames import (
EndPipeFrame,
LLMMessagesQueueFrame,
TranscriptionQueueFrame,
Frame,
Expand Down Expand Up @@ -187,10 +188,6 @@ async def process_frame(self, frame: Frame) -> AsyncGenerator[Frame, None]:

async def main(room_url: str, token):
async with aiohttp.ClientSession() as session:
global transport
global llm
global tts

messages = [
{
"role": "system",
Expand Down Expand Up @@ -235,8 +232,15 @@ async def main(room_url: str, token):
vad_stop_s=1.5,
)

start_story_event = asyncio.Event()

@transport.event_handler("on_first_other_participant_joined")
async def on_first_other_participant_joined(transport):
start_story_event.set()

async def storytime():
await start_story_event.wait()

# We're being a bit tricky here by using a special system prompt to
# ask the user for a story topic. After their intial response, we'll
# use a different system prompt to create story pages.
Expand All @@ -247,20 +251,17 @@ async def on_first_other_participant_joined(transport):
}
]
lca = LLMAssistantContextAggregator(messages)
await tts.run_to_queue(
transport.send_queue,
lca.run(
llm.run(
[
ImageFrame(None, images["grandma-listening.png"]),
LLMMessagesQueueFrame(intro_messages),
AudioFrame(sounds["listening.wav"]),
]
),
),
local_pipeline = Pipeline([llm, lca, tts], sink=transport.send_queue)
await local_pipeline.queue_frames(
[
ImageFrame(None, images["grandma-listening.png"]),
LLMMessagesQueueFrame(intro_messages),
AudioFrame(sounds["listening.wav"]),
EndPipeFrame(),
]
)
await local_pipeline.run_pipeline()

async def storytime():
fl = FrameLogger("### After Image Generation")
pipeline = Pipeline(
processors=[
Expand Down

0 comments on commit 18bf26d

Please sign in to comment.