Skip to content

Commit

Permalink
intro to gen ai, llm
Browse files Browse the repository at this point in the history
  • Loading branch information
ariefrahmansyah committed Aug 28, 2024
1 parent 211642b commit d1d8e01
Show file tree
Hide file tree
Showing 17 changed files with 2,690 additions and 24,464 deletions.
52 changes: 52 additions & 0 deletions code/llm/llama3.1_local_rag/llama3.1_local_rag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import ollama
import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma

st.title("Chat with Webpage 🌐")
st.caption("This app allows you to chat with a webpage using local llama3 and RAG")

# Get the webpage URL from the user
webpage_url = st.text_input("Enter Webpage URL", type="default")

if webpage_url:
# 1. Load the data
loader = WebBaseLoader(webpage_url)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=10)
splits = text_splitter.split_documents(docs)

# 2. Create Ollama embeddings and vector store
embeddings = OllamaEmbeddings(model="llama3.1")
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)

# 3. Call Ollama Llama3 model
def ollama_llm(question, context):
formatted_prompt = f"Question: {question}\n\nContext: {context}"
response = ollama.chat(
model="llama3.1", messages=[{"role": "user", "content": formatted_prompt}]
)
return response["message"]["content"]

# 4. RAG Setup
retriever = vectorstore.as_retriever()

def combine_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)

def rag_chain(question):
retrieved_docs = retriever.invoke(question)
formatted_context = combine_docs(retrieved_docs)
return ollama_llm(question, formatted_context)

st.success(f"Loaded {webpage_url} successfully!")

# Ask a question about the webpage
prompt = st.text_input("Ask any question about the webpage")

# Chat with the webpage
if prompt:
result = rag_chain(prompt)
st.write(result)
11 changes: 11 additions & 0 deletions code/llm/ollama/assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from phi.assistant import Assistant
from phi.llm.ollama import Ollama
from rich.pretty import pprint

assistant = Assistant(
llm=Ollama(model="llama3.1"),
description="You help people with their health and fitness goals.",
)
assistant.print_response("Share a quick healthy breakfast recipe.", markdown=True)
print("\n-*- Metrics:")
pprint(assistant.llm.metrics) # type: ignore
7 changes: 7 additions & 0 deletions code/llm/ollama/embedding.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from phi.embedder.ollama import OllamaEmbedder

embedder = OllamaEmbedder(model="llama3.1")
embeddings = embedder.get_embedding("Embed me")

print(f"Embeddings: {embeddings}")
print(f"Dimensions: {len(embeddings)}")
10 changes: 10 additions & 0 deletions code/llm/openai/assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from phi.assistant import Assistant
from phi.llm.openai import OpenAIChat
from phi.tools.duckduckgo import DuckDuckGo

assistant = Assistant(
llm=OpenAIChat(model="gpt-4o", max_tokens=500, temperature=0.3),
tools=[DuckDuckGo()],
show_tool_calls=True,
)
assistant.print_response("Whats happening in Indonesia?", markdown=True)
10 changes: 10 additions & 0 deletions code/llm/openrouter/assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from phi.assistant import Assistant
from phi.llm.openrouter import OpenRouter

assistant = Assistant(
llm=OpenRouter(model="mistralai/mistral-7b-instruct:free"),
description="You help people with their health and fitness goals.",
)
assistant.print_response(
"Share a 2 sentence quick and healthy breakfast recipe.", markdown=True
)
34 changes: 34 additions & 0 deletions code/llm/openrouter/pydantic_output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from typing import List

from phi.assistant import Assistant
from phi.llm.openrouter import OpenRouter
from pydantic import BaseModel, Field
from rich.pretty import pprint


class MovieScript(BaseModel):
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)


movie_assistant = Assistant(
llm=OpenRouter(model="mistralai/mistral-7b-instruct:free"),
description="You help people write movie ideas.",
output_model=MovieScript,
)

pprint(movie_assistant.run("New York"))
6 changes: 4 additions & 2 deletions docs/_toc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,11 @@ parts:
- file: deep_learning/keras_tuner
- file: deep_learning/transfer_learning

- caption: LLM
- caption: Generative AI
chapters:
- file: llm/intro_to_llm
- file: gen_ai/intro_to_gen_ai
- file: gen_ai/intro_to_llm
- file: gen_ai/local_llms

- caption: Reference
chapters:
Expand Down
Loading

0 comments on commit d1d8e01

Please sign in to comment.