Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LLM: Add image recognition and generation support #89

Merged
merged 18 commits into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions app/domain/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@
from ..domain.tutor_chat.tutor_chat_pipeline_execution_dto import (
TutorChatPipelineExecutionDTO,
yassinsws marked this conversation as resolved.
Show resolved Hide resolved
)
from .pyris_image import PyrisImage
from .iris_message import IrisMessage, IrisMessageRole
4 changes: 3 additions & 1 deletion app/domain/iris_message.py
kaancayli marked this conversation as resolved.
Show resolved Hide resolved
yassinsws marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from enum import Enum

from pydantic import BaseModel
from typing import List, Optional
from .pyris_image import PyrisImage


class IrisMessageRole(str, Enum):
Expand All @@ -12,6 +13,7 @@ class IrisMessageRole(str, Enum):
class IrisMessage(BaseModel):
text: str = ""
role: IrisMessageRole
images: Optional[List[PyrisImage]] = None

def __str__(self):
return f"{self.role.lower()}: {self.text}"
17 changes: 17 additions & 0 deletions app/domain/pyris_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from pydantic import BaseModel
from typing import Optional


class PyrisImage(BaseModel):
base64: str
prompt: Optional[str] = None
mime_type: Optional[str] = "jpeg"

class Config:
schema_extra = {
"example": {
"prompt": "Example prompt",
"base64": "base64EncodedString==",
"mime_type": "jpeg",
}
}
30 changes: 28 additions & 2 deletions app/llm/external/model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from abc import ABCMeta, abstractmethod
from pydantic import BaseModel

from ...domain import IrisMessage
from ...domain import IrisMessage, PyrisImage
from ...llm import CompletionArguments
from ...llm.capability import CapabilityList

Expand All @@ -23,7 +23,9 @@ def __subclasshook__(cls, subclass) -> bool:
return hasattr(subclass, "complete") and callable(subclass.complete)

@abstractmethod
def complete(self, prompt: str, arguments: CompletionArguments) -> str:
def complete(
self, prompt: str, arguments: CompletionArguments, images: [PyrisImage] = None
) -> str:
yassinsws marked this conversation as resolved.
Show resolved Hide resolved
"""Create a completion from the prompt"""
Hialus marked this conversation as resolved.
Show resolved Hide resolved
raise NotImplementedError(
f"The LLM {self.__str__()} does not support completion"
Expand Down Expand Up @@ -60,3 +62,27 @@ def embed(self, text: str) -> list[float]:
raise NotImplementedError(
f"The LLM {self.__str__()} does not support embeddings"
)


class ImageGenerationModel(LanguageModel, metaclass=ABCMeta):
"""Abstract class for the llm image generation wrappers"""

@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, "generate_images") and callable(
subclass.generate_images
)

@abstractmethod
def generate_images(
self,
prompt: str,
n: int = 1,
size: str = "256x256",
quality: str = "standard",
**kwargs,
) -> list:
"""Create an image from the prompt"""
raise NotImplementedError(
f"The LLM {self.__str__()} does not support image generation"
)
24 changes: 20 additions & 4 deletions app/llm/external/ollama.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,27 @@
import base64
from typing import Literal, Any

from ollama import Client, Message

from ...domain import IrisMessage, IrisMessageRole
from ...domain import IrisMessage, IrisMessageRole, PyrisImage
from ...llm import CompletionArguments
from ...llm.external.model import ChatModel, CompletionModel, EmbeddingModel


def convert_to_ollama_images(images: list[PyrisImage]) -> list[bytes] | None:
if not images:
return None
return [base64.b64decode(image.base64) for image in images]


def convert_to_ollama_messages(messages: list[IrisMessage]) -> list[Message]:
return [
Message(role=message.role.value, content=message.text) for message in messages
Message(
role=message.role.value,
content=message.text,
images=convert_to_ollama_images(message.images),
)
for message in messages
]


Expand All @@ -30,8 +42,12 @@ class OllamaModel(
def model_post_init(self, __context: Any) -> None:
self._client = Client(host=self.host) # TODO: Add authentication (httpx auth?)

def complete(self, prompt: str, arguments: CompletionArguments) -> str:
response = self._client.generate(model=self.model, prompt=prompt)
def complete(
self, prompt: str, arguments: CompletionArguments, images: [PyrisImage] = None
) -> str:
response = self._client.generate(
model=self.model, prompt=prompt, images=convert_to_ollama_images(images)
)
return response["response"]

def chat(
Expand Down
30 changes: 24 additions & 6 deletions app/llm/external/openai_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from openai import OpenAI
from openai.lib.azure import AzureOpenAI
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessage
from openai.types.chat import ChatCompletionMessage

from ...domain import IrisMessage, IrisMessageRole
from ...llm import CompletionArguments
Expand All @@ -11,10 +11,29 @@

def convert_to_open_ai_messages(
messages: list[IrisMessage],
) -> list[ChatCompletionMessageParam]:
return [
{"role": message.role.value, "content": message.text} for message in messages
]
) -> list[dict[str, Any]]:
"""
Convert IrisMessages to OpenAI messages
"""
openai_messages = []
for message in messages:
if message.images:
content = [{"type": "text", "text": message.text}]
for image in message.images:
content.append(
{
"type": "image_url",
"image_url": {
"url": f"data:image/{image.mime_type};base64,{image.base64}",
"detail": "high",
},
}
)
else:
content = [{"type": "text", "text": message.text}]
openai_message = {"role": message.role.value, "content": content}
openai_messages.append(openai_message)
return openai_messages


def convert_to_iris_message(message: ChatCompletionMessage) -> IrisMessage:
Expand All @@ -36,7 +55,6 @@ def chat(
messages=convert_to_open_ai_messages(messages),
temperature=arguments.temperature,
max_tokens=arguments.max_tokens,
stop=arguments.stop,
)
return convert_to_iris_message(response.choices[0].message)

Expand Down
5 changes: 4 additions & 1 deletion app/llm/external/openai_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from openai import OpenAI
from openai.lib.azure import AzureOpenAI

from ...domain import PyrisImage
from ...llm import CompletionArguments
from ...llm.external.model import CompletionModel

Expand All @@ -11,7 +12,9 @@ class OpenAICompletionModel(CompletionModel):
api_key: str
_client: OpenAI

def complete(self, prompt: str, arguments: CompletionArguments) -> any:
def complete(
self, prompt: str, arguments: CompletionArguments, images: [PyrisImage] = None
) -> any:
yassinsws marked this conversation as resolved.
Show resolved Hide resolved
response = self._client.completions.create(
model=self.model,
prompt=prompt,
Expand Down
60 changes: 60 additions & 0 deletions app/llm/external/openai_dalle.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import base64
from datetime import datetime
from typing import Literal, Any

import requests
from openai import OpenAI

from ...domain.pyris_image import PyrisImage
from ...llm.external.model import ImageGenerationModel


class OpenAIDalleWrapper(ImageGenerationModel):
type: Literal["openai_dalle"]
model: str
_client: OpenAI

def model_post_init(self, __context: Any) -> None:
self._client = OpenAI(api_key=self.api_key)

def generate_images(
self,
prompt: str,
n: int = 1,
size: Literal[
"256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"
] = "256x256",
quality: Literal["standard", "hd"] = "standard",
**kwargs
) -> [PyrisImage]:
response = self._client.images.generate(
model=self.model,
prompt=prompt,
size=size,
quality=quality,
n=n,
response_format="url",
**kwargs
)

images = response.data
iris_images = []
for image in images:
if image.revised_prompt is None:
image.revised_prompt = prompt
if image.b64_json is None:
image_response = requests.get(image.url)
image.b64_json = base64.b64encode(image_response.content).decode(
"utf-8"
)

iris_images.append(
PyrisImage(
prompt=image.revised_prompt,
base64=image.b64_json,
timestamp=datetime.fromtimestamp(response.created),
raw_data=image,
)
)

return iris_images
Loading