From 2cf33df43362b6911ce2b844b6c10f55d16dff10 Mon Sep 17 00:00:00 2001 From: Daniel Trumpp Date: Fri, 12 Jul 2024 04:09:52 +0000 Subject: [PATCH] Adjust folder structure for HACS --- .gitignore | 1 + custom_components/README.md | 2 + custom_components/__init__.py | 91 ++++++++++++++++++++++++++ custom_components/config_flow.py | 74 +++++++++++++++++++++ custom_components/const.py | 3 + custom_components/icons.json | 5 ++ custom_components/manifest.json | 23 +++++++ custom_components/services.yaml | 25 +++++++ custom_components/strings.json | 21 ++++++ custom_components/translations/en.json | 18 +++++ 10 files changed, 263 insertions(+) create mode 100644 .gitignore create mode 100644 custom_components/README.md create mode 100644 custom_components/__init__.py create mode 100644 custom_components/config_flow.py create mode 100644 custom_components/const.py create mode 100644 custom_components/icons.json create mode 100644 custom_components/manifest.json create mode 100644 custom_components/services.yaml create mode 100644 custom_components/strings.json create mode 100644 custom_components/translations/en.json diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ed8ebf5 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/custom_components/README.md b/custom_components/README.md new file mode 100644 index 0000000..167bb75 --- /dev/null +++ b/custom_components/README.md @@ -0,0 +1,2 @@ +# hass_ollama_image_analysis +Image analysis with Ollama (AI models) from within Home Assistant diff --git a/custom_components/__init__.py b/custom_components/__init__.py new file mode 100644 index 0000000..771cc9f --- /dev/null +++ b/custom_components/__init__.py @@ -0,0 +1,91 @@ +"""Ollama (local AI models) image analysis integration""" + +import voluptuous as vol + +from homeassistant.core import ( + Config, + HomeAssistant, + ServiceCall, + SupportsResponse, + ServiceResponse, +) + +from homeassistant.config_entries import ConfigEntry + +from ollama import AsyncClient + +import pybase64 +import aiofiles + +DOMAIN = "ollama_image_analysis" + +OLLAMA_IMAGE_ANALYSIS_SERVICE_NAME = "ollama_image_analysis" +OLLAMA_IMAGE_ANALYSIS_SCHEMA = vol.Schema( + { + vol.Required("prompt"): str, + vol.Required("model"): str, + vol.Required("image_path"): str, + } +) + + +async def read_binary_file(file_name): + try: + async with aiofiles.open(file_name, "r") as f: + return await f.read() + except FileNotFoundError: + print(f"The file {file_name} was not found.") + except Exception as e: + print(f"An error occurred: {e}") + + +async def async_setup_entry(hass: HomeAssistant, config: ConfigEntry) -> bool: + """Setup up a config entry.""" + + async def ollama_image_analysis(call: ServiceCall) -> ServiceResponse: + config_dict = config.as_dict() + + # load the call parameters + prompt = call.data["prompt"] + image_path = call.data["image_path"] + model = call.data.get("model", "llava") + + host = config_dict["data"]["host"] + + client = AsyncClient(host=host) + + binary_image = await read_binary_file("/workspaces/hass_core" + image_path) + + # b64encoded_binary_image = await pybase64.b64encode(binary_image) + + # make the call to ollama + response = await client.chat( + model=model, + messages=[ + { + "role": "user", + "content": prompt, + "image": binary_image, + }, + ], + ) + + print("Rückmeldung von Modell: ", response["message"]["content"]) + + return { + "items": [ + { + "result": response["message"]["content"], + } + ], + } + + hass.services.async_register( + DOMAIN, + OLLAMA_IMAGE_ANALYSIS_SERVICE_NAME, + ollama_image_analysis, + schema=OLLAMA_IMAGE_ANALYSIS_SCHEMA, + supports_response=SupportsResponse.ONLY, + ) + + return True diff --git a/custom_components/config_flow.py b/custom_components/config_flow.py new file mode 100644 index 0000000..eb884e3 --- /dev/null +++ b/custom_components/config_flow.py @@ -0,0 +1,74 @@ +"""Config flow for Ollama (local AI models) image analysis integration.""" + +from __future__ import annotations + +import logging +from typing import Any +import re + +import voluptuous as vol + +from homeassistant.config_entries import ConfigFlow, ConfigFlowResult +from homeassistant.const import CONF_HOST +from homeassistant.core import HomeAssistant, HomeAssistantError + +from .const import DOMAIN + +_LOGGER = logging.getLogger(__name__) + +STEP_USER_DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str}) + + +async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]: + """Validate the user input allows us to connect. + + Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. + """ + + # If your PyPI package is not built with async, pass your methods + # to the executor: + # await hass.async_add_executor_job( + # your_validate_func, data[CONF_USERNAME], data[CONF_PASSWORD] + # ) + + p = "(?:http.*://)(?P[^:/ ]+).?(?P[0-9]*).*" + + result = re.match(p, data[CONF_HOST]) + + if not result: + raise InvalidUrlScheme + + # Return info that you want to store in the config entry. + return {"host": data[CONF_HOST]} + + +class ConfigFlow(ConfigFlow, domain=DOMAIN): + """Handle a config flow for Ollama (local AI models) image analysis.""" + + VERSION = 1 + + async def async_step_user( + self, user_input: dict[str, Any] | None = None + ) -> ConfigFlowResult: + """Handle the initial step.""" + errors: dict[str, str] = {} + if user_input is not None: + try: + await validate_input(self.hass, user_input) + except InvalidUrlScheme: + errors["base"] = "invalid_url_scheme" + except Exception: + _LOGGER.exception("Unexpected exception") + errors["base"] = "unknown" + else: + return self.async_create_entry( + title="Ollama (local AI models) image analysis", data=user_input + ) + + return self.async_show_form( + step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors + ) + + +class InvalidUrlScheme(HomeAssistantError): + """Error to indicate we cannot connect.""" diff --git a/custom_components/const.py b/custom_components/const.py new file mode 100644 index 0000000..1063cec --- /dev/null +++ b/custom_components/const.py @@ -0,0 +1,3 @@ +"""Constants for the Ollama (local AI models) image analysis integration.""" + +DOMAIN = "ollama_image_analysis" diff --git a/custom_components/icons.json b/custom_components/icons.json new file mode 100644 index 0000000..55953bc --- /dev/null +++ b/custom_components/icons.json @@ -0,0 +1,5 @@ +{ + "services": { + "ollama_image_analysis": "mdi:image-auto-adjust" + } +} \ No newline at end of file diff --git a/custom_components/manifest.json b/custom_components/manifest.json new file mode 100644 index 0000000..29c2f44 --- /dev/null +++ b/custom_components/manifest.json @@ -0,0 +1,23 @@ +{ + "domain": "ollama_image_analysis", + "name": "Ollama (local AI models) image analysis", + "version": "0.1.0", + "codeowners": [ + "@the-smart-home-maker" + ], + "config_flow": true, + "single_config_entry": true, + "dependencies": [], + "documentation": "https://github.com/the-smart-home-maker/hass_ollama_image_analysis", + "issue_tracker": "https://github.com/the-smart-home-maker/hass_ollama_image_analysis/issues", + "homekit": {}, + "iot_class": "local_polling", + "integration_type": "service", + "requirements": [ + "ollama==0.2.1", + "pybase64==1.3.2", + "aiofiles" + ], + "ssdp": [], + "zeroconf": [] +} \ No newline at end of file diff --git a/custom_components/services.yaml b/custom_components/services.yaml new file mode 100644 index 0000000..8506614 --- /dev/null +++ b/custom_components/services.yaml @@ -0,0 +1,25 @@ +ollama_image_analysis: + name: Analyze camera image with local AI based upon Ollama + description: Analyze a camera image with a local AI model (e.g. llava) based upon Ollama + fields: + prompt: + description: The prompt describing how to analyze the image + example: | + You are a security agent and analyze the following camera image. Your focus is to identify irregularities. Please list all irregularities. + required: true + selector: + text: + multiline: true + image_path: + description: This should be the path to a snapshot image taken from your camera which shall be analyzed. + example: /x/y/camera_image.jpg + required: true + selector: + text: + model: + description: Choose which model to use. Defaults to llava. + example: llava + default: llava + required: false + selector: + text: diff --git a/custom_components/strings.json b/custom_components/strings.json new file mode 100644 index 0000000..d6e3212 --- /dev/null +++ b/custom_components/strings.json @@ -0,0 +1,21 @@ +{ + "config": { + "step": { + "user": { + "data": { + "host": "[%key:common::config_flow::data::host%]", + "username": "[%key:common::config_flow::data::username%]", + "password": "[%key:common::config_flow::data::password%]" + } + } + }, + "error": { + "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]", + "invalid_auth": "[%key:common::config_flow::error::invalid_auth%]", + "unknown": "[%key:common::config_flow::error::unknown%]" + }, + "abort": { + "already_configured": "[%key:common::config_flow::abort::already_configured_device%]" + } + } +} diff --git a/custom_components/translations/en.json b/custom_components/translations/en.json new file mode 100644 index 0000000..28d1e9e --- /dev/null +++ b/custom_components/translations/en.json @@ -0,0 +1,18 @@ +{ + "config": { + "abort": { + "already_configured": "Device is already configured" + }, + "error": { + "unknown": "Unexpected error", + "invalid_url_scheme": "Invalid URL scheme" + }, + "step": { + "user": { + "data": { + "host": "Host" + } + } + } + } +} \ No newline at end of file