Skip to content

Commit

Permalink
Adjust folder structure for HACS
Browse files Browse the repository at this point in the history
  • Loading branch information
the-smart-home-maker committed Jul 12, 2024
1 parent dc477f3 commit 2cf33df
Show file tree
Hide file tree
Showing 10 changed files with 263 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__pycache__
2 changes: 2 additions & 0 deletions custom_components/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# hass_ollama_image_analysis
Image analysis with Ollama (AI models) from within Home Assistant
91 changes: 91 additions & 0 deletions custom_components/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
"""Ollama (local AI models) image analysis integration"""

import voluptuous as vol

from homeassistant.core import (
Config,
HomeAssistant,
ServiceCall,
SupportsResponse,
ServiceResponse,
)

from homeassistant.config_entries import ConfigEntry

from ollama import AsyncClient

import pybase64
import aiofiles

DOMAIN = "ollama_image_analysis"

OLLAMA_IMAGE_ANALYSIS_SERVICE_NAME = "ollama_image_analysis"
OLLAMA_IMAGE_ANALYSIS_SCHEMA = vol.Schema(
{
vol.Required("prompt"): str,
vol.Required("model"): str,
vol.Required("image_path"): str,
}
)


async def read_binary_file(file_name):
try:
async with aiofiles.open(file_name, "r") as f:
return await f.read()
except FileNotFoundError:
print(f"The file {file_name} was not found.")
except Exception as e:
print(f"An error occurred: {e}")


async def async_setup_entry(hass: HomeAssistant, config: ConfigEntry) -> bool:
"""Setup up a config entry."""

async def ollama_image_analysis(call: ServiceCall) -> ServiceResponse:
config_dict = config.as_dict()

# load the call parameters
prompt = call.data["prompt"]
image_path = call.data["image_path"]
model = call.data.get("model", "llava")

host = config_dict["data"]["host"]

client = AsyncClient(host=host)

binary_image = await read_binary_file("/workspaces/hass_core" + image_path)

# b64encoded_binary_image = await pybase64.b64encode(binary_image)

# make the call to ollama
response = await client.chat(
model=model,
messages=[
{
"role": "user",
"content": prompt,
"image": binary_image,
},
],
)

print("Rückmeldung von Modell: ", response["message"]["content"])

return {
"items": [
{
"result": response["message"]["content"],
}
],
}

hass.services.async_register(
DOMAIN,
OLLAMA_IMAGE_ANALYSIS_SERVICE_NAME,
ollama_image_analysis,
schema=OLLAMA_IMAGE_ANALYSIS_SCHEMA,
supports_response=SupportsResponse.ONLY,
)

return True
74 changes: 74 additions & 0 deletions custom_components/config_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""Config flow for Ollama (local AI models) image analysis integration."""

from __future__ import annotations

import logging
from typing import Any
import re

import voluptuous as vol

from homeassistant.config_entries import ConfigFlow, ConfigFlowResult
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, HomeAssistantError

from .const import DOMAIN

_LOGGER = logging.getLogger(__name__)

STEP_USER_DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})


async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
"""

# If your PyPI package is not built with async, pass your methods
# to the executor:
# await hass.async_add_executor_job(
# your_validate_func, data[CONF_USERNAME], data[CONF_PASSWORD]
# )

p = "(?:http.*://)(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*"

result = re.match(p, data[CONF_HOST])

if not result:
raise InvalidUrlScheme

# Return info that you want to store in the config entry.
return {"host": data[CONF_HOST]}


class ConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Ollama (local AI models) image analysis."""

VERSION = 1

async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Handle the initial step."""
errors: dict[str, str] = {}
if user_input is not None:
try:
await validate_input(self.hass, user_input)
except InvalidUrlScheme:
errors["base"] = "invalid_url_scheme"
except Exception:
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(
title="Ollama (local AI models) image analysis", data=user_input
)

return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)


class InvalidUrlScheme(HomeAssistantError):
"""Error to indicate we cannot connect."""
3 changes: 3 additions & 0 deletions custom_components/const.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""Constants for the Ollama (local AI models) image analysis integration."""

DOMAIN = "ollama_image_analysis"
5 changes: 5 additions & 0 deletions custom_components/icons.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"services": {
"ollama_image_analysis": "mdi:image-auto-adjust"
}
}
23 changes: 23 additions & 0 deletions custom_components/manifest.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
{
"domain": "ollama_image_analysis",
"name": "Ollama (local AI models) image analysis",
"version": "0.1.0",
"codeowners": [
"@the-smart-home-maker"
],
"config_flow": true,
"single_config_entry": true,
"dependencies": [],
"documentation": "https://github.com/the-smart-home-maker/hass_ollama_image_analysis",
"issue_tracker": "https://github.com/the-smart-home-maker/hass_ollama_image_analysis/issues",
"homekit": {},
"iot_class": "local_polling",
"integration_type": "service",
"requirements": [
"ollama==0.2.1",
"pybase64==1.3.2",
"aiofiles"
],
"ssdp": [],
"zeroconf": []
}
25 changes: 25 additions & 0 deletions custom_components/services.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
ollama_image_analysis:
name: Analyze camera image with local AI based upon Ollama
description: Analyze a camera image with a local AI model (e.g. llava) based upon Ollama
fields:
prompt:
description: The prompt describing how to analyze the image
example: |
You are a security agent and analyze the following camera image. Your focus is to identify irregularities. Please list all irregularities.
required: true
selector:
text:
multiline: true
image_path:
description: This should be the path to a snapshot image taken from your camera which shall be analyzed.
example: /x/y/camera_image.jpg
required: true
selector:
text:
model:
description: Choose which model to use. Defaults to llava.
example: llava
default: llava
required: false
selector:
text:
21 changes: 21 additions & 0 deletions custom_components/strings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
{
"config": {
"step": {
"user": {
"data": {
"host": "[%key:common::config_flow::data::host%]",
"username": "[%key:common::config_flow::data::username%]",
"password": "[%key:common::config_flow::data::password%]"
}
}
},
"error": {
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
"invalid_auth": "[%key:common::config_flow::error::invalid_auth%]",
"unknown": "[%key:common::config_flow::error::unknown%]"
},
"abort": {
"already_configured": "[%key:common::config_flow::abort::already_configured_device%]"
}
}
}
18 changes: 18 additions & 0 deletions custom_components/translations/en.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"config": {
"abort": {
"already_configured": "Device is already configured"
},
"error": {
"unknown": "Unexpected error",
"invalid_url_scheme": "Invalid URL scheme"
},
"step": {
"user": {
"data": {
"host": "Host"
}
}
}
}
}

0 comments on commit 2cf33df

Please sign in to comment.