diff --git a/tutorials/llm/4_structured_output.py b/tutorials/llm/4_structured_output.py deleted file mode 100644 index ca07aff2a..000000000 --- a/tutorials/llm/4_structured_output.py +++ /dev/null @@ -1,129 +0,0 @@ -# %% [markdown] -""" -# LLM: 4. Structured Output - -Sometimes, we want to output structured data, such as a valid JSON object or -want to automatically fill particular fields in the output Message. -In Chatsky we can do that using Structured Output. -""" - -# %pip install chatsky[llm] -# %% -import os -from chatsky import ( - TRANSITIONS, - RESPONSE, - GLOBAL, - Pipeline, - Transition as Tr, - conditions as cnd, - destinations as dst, -) -from langchain_openai import ChatOpenAI -from langchain_anthropic import ChatAnthropic -from chatsky.core.message import Message -from chatsky.utils.testing import is_interactive_mode -from chatsky.llm import LLM_API -from chatsky.responses.llm import LLMResponse - - -from langchain_core.pydantic_v1 import BaseModel, Field - - -os.environ["OPENAI_API_KEY"] = "" -os.environ["ANTHROPIC_API_KEY"] = "" - - -# %% [markdown] -""" -In this tutorial we will define two models. -""" -# %% -assistant_model = LLM_API(ChatOpenAI(model="gpt-4o-mini")) -movie_model = LLM_API(ChatAnthropic(model="claude-3-opus-20240229")) - -# %% [markdown] -""" -For the structured output we will use two classes to show two possible ways of -using `message_schema` in responses. -The `Movie`, inherited from the `BaseModel` will act as a schema for the -response _text_, that will contain valid JSON containing desribed information. -The `ImportantMessage`, inherited from the `Message` class, will otherwise -define the fields of the output `Message`. In this example we will use this -to mark the message as important. -""" - - -# %% -class Movie(BaseModel): - name: str = Field(description="Name of the movie") - genre: str = Field(description="Genre of the movie") - plot: str = Field(description="Plot of the movie in chapters") - cast: list = Field(description="List of the actors") - - -class ImportantMessage(Message): - text: str = Field(description="Text of the note") - misc: dict = Field( - description="A dictionary with 'important' " - "key and true/false value in it" - ) - - -# %% - -script = { - GLOBAL: { - TRANSITIONS: [ - Tr( - dst=("greeting_flow", "start_node"), - cnd=cnd.ExactMatch("/start"), - ), - Tr(dst=("movie_flow", "main_node"), cnd=cnd.ExactMatch("/movie")), - Tr(dst=("note_flow", "main_node"), cnd=cnd.ExactMatch("/note")), - ] - }, - "greeting_flow": { - "start_node": { - RESPONSE: Message(), - }, - "fallback_node": { - RESPONSE: Message("I did not quite understand you..."), - TRANSITIONS: [Tr(dst="start_node")], - }, - }, - "movie_flow": { - "main_node": { - RESPONSE: LLMResponse( - "movie_model", - prompt="Ask user to request you for movie ideas.", - message_schema=Movie, - ), - TRANSITIONS: [Tr(dst=dst.Current())], - } - }, - "note_flow": { - "main_node": { - RESPONSE: LLMResponse( - "note_model", - prompt="Help user take notes and mark the important ones.", - message_schema=ImportantMessage, - ), - TRANSITIONS: [Tr(dst=dst.Current())], - } - }, -} - -# %% -pipeline = Pipeline( - script=script, - start_label=("greeting_flow", "start_node"), - fallback_label=("greeting_flow", "fallback_node"), - models={"movie_model": movie_model, "note_model": assistant_model}, -) - -if __name__ == "__main__": - # This runs tutorial in interactive mode if not in IPython env - # and if `DISABLE_INTERACTIVE_MODE` is not set - if is_interactive_mode(): - pipeline.run() # This runs tutorial in interactive mode diff --git a/tutorials/llm/5_llm_slots.py b/tutorials/llm/5_llm_slots.py deleted file mode 100644 index fa3f80bea..000000000 --- a/tutorials/llm/5_llm_slots.py +++ /dev/null @@ -1,112 +0,0 @@ -# %% [markdown] -""" -# LLM: 5. LLM Slots - -If we want to retrieve some information from user input like name, address or -email we can simply use Chatsky's Slot system and user regexes or other formally -specified data retrieval techniques. But if the data is more finicky to get or -not explicitly presented in utterance we -encourage you to utilize Chatsky LLM Slots. -In this tutorial we will see how we can set up Slots that uses LLM's under -the hood to extract more obscure information from users input. -""" -# %pip install chatsky[llm] -# %% -import os -from chatsky import ( - RESPONSE, - TRANSITIONS, - PRE_TRANSITION, - GLOBAL, - LOCAL, - Pipeline, - Transition as Tr, - conditions as cnd, - processing as proc, - responses as rsp, -) -from langchain_openai import ChatOpenAI - -from chatsky.utils.testing import ( - is_interactive_mode, -) -from chatsky.slots.llm import LLMSlot, LLMGroupSlot - - -os.environ["OPENAI_API_KEY"] = "" - - -# %% [markdown] -""" -In this example we define LLM Group Slot with two LLM Slots in it. -Both of them can be used separately just as regular slots, -but if you are going to extract several LLM Slots simultaneously -we encourage you to put them in LLM Group Slot for optimization and convenience. - -In the `LLMSlot.caption` parameter you should put description of a data piece -you want to retrieve. More specific descriptions will yield better results, -especially when using smaller models. - -Note that we are using `langchain_community.chat_models.openai.ChatOpenAI` and -not `chatsky.llm.LLM_API` here. -""" - -# %% -model = ChatOpenAI(model="gpt-4o-mini") - -SLOTS = { - "person": LLMGroupSlot( - username=LLMSlot(caption="User's username in uppercase"), - job=LLMSlot(caption="User's occupation, job, profession"), - model=model, - ) -} - -script = { - GLOBAL: { - TRANSITIONS: [ - Tr(dst=("user_flow", "ask"), cnd=cnd.Regexp(r"^[sS]tart")) - ] - }, - "user_flow": { - LOCAL: { - PRE_TRANSITION: {"get_slot": proc.Extract("person")}, - TRANSITIONS: [ - Tr( - dst=("user_flow", "tell"), - cnd=cnd.SlotsExtracted("person"), - priority=1.2, - ), - Tr(dst=("user_flow", "repeat_question"), priority=0.8), - ], - }, - "start": {RESPONSE: "", TRANSITIONS: [Tr(dst=("user_flow", "ask"))]}, - "ask": { - RESPONSE: "Hello! Tell me about yourself: what are you doing for " - "the living or your hobbies. " - "And don't forget to introduce yourself!", - }, - "tell": { - RESPONSE: rsp.FilledTemplate( - "So you are {person.username} and your " - "occupation is {person.job}, right?" - ), - TRANSITIONS: [Tr(dst=("user_flow", "ask"))], - }, - "repeat_question": { - RESPONSE: "I didn't quite understand you...", - }, - }, -} - -pipeline = Pipeline( - script=script, - start_label=("user_flow", "start"), - fallback_label=("user_flow", "repeat_question"), - slots=SLOTS, -) - - -if __name__ == "__main__": - if is_interactive_mode(): - pipeline.run()