From 03a73cc16d2434f4579aba18dd86ae6fd8db4ae3 Mon Sep 17 00:00:00 2001 From: Alexander Lyon Date: Thu, 9 May 2024 03:52:20 +0100 Subject: [PATCH 1/2] Update documentation with detailed explanations --- crates/litehouse-config/src/capabilities.rs | 50 ++++++++++++++++++ crates/litehouse-config/src/lib.rs | 16 ++---- crates/litehouse-config/src/parallelism.rs | 40 +++++++++++++++ site/content/docs/plugins.mdx | 42 +++++++++++++-- site/content/docs/settings.mdx | 57 +++++++++++++++++++++ 5 files changed, 189 insertions(+), 16 deletions(-) create mode 100644 crates/litehouse-config/src/capabilities.rs create mode 100644 crates/litehouse-config/src/parallelism.rs diff --git a/crates/litehouse-config/src/capabilities.rs b/crates/litehouse-config/src/capabilities.rs new file mode 100644 index 0000000..b540a66 --- /dev/null +++ b/crates/litehouse-config/src/capabilities.rs @@ -0,0 +1,50 @@ +//! Capabilities for Litehouse plugins. +//! +//! This module defines the capabilities that can be granted to plugins, allowing them to interact with the system and external resources in a controlled manner. + +use std::fmt; +use std::str::FromStr; +use serde::{Serialize, Deserialize}; +use thiserror::Error; + +/// Represents the different capabilities that can be granted to plugins. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum Capability { + /// Allows the plugin to start an HTTP server on the specified port. + #[serde(rename = "http-server")] + HttpServer(u16), + /// Allows the plugin to make HTTP requests to the specified URL. + #[serde(rename = "http-client")] + HttpClient(String), +} + +impl fmt::Display for Capability { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Capability::HttpServer(port) => write!(f, "http-server:{}", port), + Capability::HttpClient(url) => write!(f, "http-client:{}", url), + } + } +} + +impl FromStr for Capability { + type Err = CapabilityParseError; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.splitn(2, ':').collect(); + match parts.as_slice() { + ["http-server", port] => port.parse().map(Capability::HttpServer).map_err(|_| CapabilityParseError::InvalidPort), + ["http-client", url] => Ok(Capability::HttpClient(url.to_string())), + _ => Err(CapabilityParseError::InvalidFormat), + } + } +} + +/// Errors that can occur when parsing a string into a `Capability`. +#[derive(Debug, Error)] +pub enum CapabilityParseError { + #[error("invalid capability format")] + InvalidFormat, + #[error("invalid port number")] + InvalidPort, +} diff --git a/crates/litehouse-config/src/lib.rs b/crates/litehouse-config/src/lib.rs index f5da669..04cea63 100644 --- a/crates/litehouse-config/src/lib.rs +++ b/crates/litehouse-config/src/lib.rs @@ -7,6 +7,8 @@ #![feature(let_chains)] mod hash_read; +mod parallelism; +mod capabilities; use std::{ cmp::Ordering, collections::HashMap, fmt::Display, num::NonZeroU8, path::Path, str::FromStr, @@ -18,6 +20,8 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use thiserror::Error; use tokio::io::{AsyncRead, AsyncWrite}; +use parallelism::SandboxStrategy; +use capabilities::Capability; const REGISTRY_SEPARATOR: &str = "::"; const VERSION_SEPARATOR: &str = "@"; @@ -72,18 +76,6 @@ fn is_default(t: &T) -> bool { *t == Default::default() } -#[derive(JsonSchema, Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)] -#[serde(rename_all = "snake_case")] -pub enum SandboxStrategy { - /// All plugins are run in the same storage sandbox - Global, - /// Each plugin type is run in its own storage sandbox - Plugin, - /// Each plugin instance is run in its own storage sandbox - #[default] - Instance, -} - #[derive(JsonSchema, Serialize, Deserialize, Debug, PartialEq)] pub struct MaxBuildCount(NonZeroU8); diff --git a/crates/litehouse-config/src/parallelism.rs b/crates/litehouse-config/src/parallelism.rs new file mode 100644 index 0000000..26dd401 --- /dev/null +++ b/crates/litehouse-config/src/parallelism.rs @@ -0,0 +1,40 @@ +//! Parallelism strategies for Litehouse plugins. +//! +//! This module explains the different parallelism strategies available in Litehouse for running plugins. +//! These strategies determine how plugin instances are sandboxed and executed in parallel. + +use serde::{Deserialize, Serialize}; + +/// Defines the strategy to use for sandboxing plugins. +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub enum SandboxStrategy { + /// All plugins are run in the same storage sandbox. + #[serde(rename = "global")] + Global, + /// Each plugin type is run in its own storage sandbox. + #[serde(rename = "plugin")] + Plugin, + /// Each plugin instance is run in its own storage sandbox. + #[serde(rename = "instance")] + Instance, +} + +impl SandboxStrategy { + /// Provides a detailed explanation of the sandbox strategy. + pub fn description(&self) -> &'static str { + match self { + SandboxStrategy::Global => "All plugins are run in the same storage sandbox. This strategy is useful for environments with limited resources, as it minimizes the number of sandboxes required. However, it also means that all plugins share the same storage space, which can lead to conflicts and security concerns.", + SandboxStrategy::Plugin => "Each plugin type is run in its own storage sandbox. This strategy provides a balance between resource usage and isolation. Plugins of the same type share a sandbox, which can be useful for sharing state between instances of the same plugin.", + SandboxStrategy::Instance => "Each plugin instance is run in its own storage sandbox. This strategy provides the highest level of isolation, ensuring that each plugin instance has its own separate storage space. It is the default strategy and recommended for most use cases.", + } + } +} + +/// Example usage of sandbox strategies. +/// +/// This function demonstrates how to select and use a sandbox strategy for a hypothetical plugin. +pub fn example_usage() { + let strategy = SandboxStrategy::Instance; + println!("Selected sandbox strategy: {:?}", strategy); + println!("Description: {}", strategy.description()); +} diff --git a/site/content/docs/plugins.mdx b/site/content/docs/plugins.mdx index 92d77e3..a617c1c 100644 --- a/site/content/docs/plugins.mdx +++ b/site/content/docs/plugins.mdx @@ -1,9 +1,43 @@ --- title: Plugins -description: Learn about plugins +description: Learn about plugins and how to create or host your own --- -## Want to publish your own? +## Introduction -We don't currently have infrastructure in place for authoring and uploading -plugins. For now, get in touch and we can put it up for you! +Litehouse plugins are WebAssembly modules that extend the functionality of the Litehouse home automation system. They can be written in any language that compiles to WebAssembly, offering a wide range of possibilities for automation, integration, and customization. + +## Hosting Your Own Plugins + +Each plugin is a WebAssembly file hosted on S3. To host your own plugins, you can use any S3-compatible storage service. The plugin registry in Litehouse is designed to be decentralized, allowing anyone to host their own registry or contribute to the official Litehouse registry. + +## Writing a Plugin + +To write a plugin, you need to compile your code to WebAssembly and adhere to the Litehouse plugin specification. The specification is based on the WebAssembly Interface Types (wit) proposal, which allows WebAssembly modules to interoperate with the host environment. + +You can learn more about writing plugins and the wit-bindgen tool, which facilitates the creation of WebAssembly modules that conform to the wit specification, by visiting the [wit-bindgen repository](https://github.com/bytecodealliance/wit-bindgen). + +## Example Plugin + +For an example plugin and more detailed instructions on how to develop a plugin for Litehouse, check out the [Litehouse GitHub repository](https://github.com/arlyon/litehouse/tree/main/crates). Here, you'll find several example plugins and the necessary tools to get started. + +## Importing Plugins + +To use a plugin in Litehouse, you need to import it in your `settings.json` file. The import syntax allows you to specify the registry and version of the plugin: + +```json +"imports": [ + "my-registry::plugin@0.1.0" +] +``` + +This syntax enables Litehouse to fetch the plugin from the specified registry and ensure that the correct version is used. + +## Plugin and Plugin-Macro Crates + +Litehouse provides two Rust crates to help with plugin development: + +- `plugin`: This crate includes utilities and macros to facilitate the creation of plugins, including schema generation and serialization. +- `plugin-macro`: This crate provides procedural macros to generate boilerplate code required for creating Litehouse plugins. + +By using these crates, you can streamline the development process and focus on the unique functionality of your plugin. diff --git a/site/content/docs/settings.mdx b/site/content/docs/settings.mdx index fb3b7cb..468b588 100644 --- a/site/content/docs/settings.mdx +++ b/site/content/docs/settings.mdx @@ -2,3 +2,60 @@ title: Settings description: Learn about the settings.json file --- + +## JSON Schema + +The `settings.json` file is the central configuration file for Litehouse. It defines the plugins to use, their configurations, and other system-wide settings. Below is an overview of the sections you might find in a `settings.json` file. + +### Imports + +The `imports` section lists the plugins that your Litehouse instance will use. These can be official plugins from the Litehouse registry or custom plugins you've developed. The syntax for an import is `registry::plugin@version`. + +```json +"imports": [ + "litehouse::weather@0.1.0", + "my-registry::custom-plugin@1.2.3" +] +``` + +### Plugins + +The `plugins` section defines instances of the plugins you've imported. Each plugin can be configured individually according to its schema. + +```json +"plugins": { + "weather_sensor": { + "plugin": "litehouse::weather@0.1.0", + "config": { + "location": "New York" + } + } +} +``` + +### Capabilities + +The `capabilities` section lists the capabilities that your Litehouse instance and its plugins can use. This controls access to system resources and external services, ensuring a secure environment. + +```json +"capabilities": [ + "http-server:8080", + "http-client:api.weather.com" +] +``` + +### Parallelism Strategies + +Litehouse supports different parallelism strategies for running plugins, allowing you to balance between isolation and resource usage. The default strategy is `instance`, where each plugin instance runs in its own sandbox. + +```json +"engine": { + "sandbox_strategy": "instance" +} +``` + +Other strategies include `global`, where all plugins share the same sandbox, and `plugin`, where each plugin type has its own sandbox. + +## Detailed Configuration + +For more detailed information on configuring Litehouse, including advanced engine settings and plugin-specific configurations, refer to the respective sections in the documentation or the JSON schema provided with Litehouse. From e5814db7b82b01e4894863ad4351afd69ebf0ed3 Mon Sep 17 00:00:00 2001 From: Alexander Lyon Date: Thu, 9 May 2024 04:20:18 +0100 Subject: [PATCH 2/2] more fixups --- Cargo.toml | 6 + crates/litehouse-config/src/capabilities.rs | 31 +- crates/litehouse-config/src/hash_read.rs | 4 +- crates/litehouse-config/src/import.rs | 348 +++++++++++++++ crates/litehouse-config/src/lib.rs | 457 ++------------------ crates/litehouse-config/src/parallelism.rs | 24 +- 6 files changed, 415 insertions(+), 455 deletions(-) create mode 100644 crates/litehouse-config/src/import.rs diff --git a/Cargo.toml b/Cargo.toml index 4f8859f..652a045 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,3 +24,9 @@ plugins = [ "path:crates/tasmota", "path:crates/weather", ] +bin = [ + "path:crates/litehouse", + "path:crates/litehouse-config", + "path:crates/plugin", + "path:crates/plugin-macro", +] diff --git a/crates/litehouse-config/src/capabilities.rs b/crates/litehouse-config/src/capabilities.rs index b540a66..9f9b6a8 100644 --- a/crates/litehouse-config/src/capabilities.rs +++ b/crates/litehouse-config/src/capabilities.rs @@ -2,22 +2,40 @@ //! //! This module defines the capabilities that can be granted to plugins, allowing them to interact with the system and external resources in a controlled manner. +use serde::{Deserialize, Serialize}; use std::fmt; use std::str::FromStr; -use serde::{Serialize, Deserialize}; use thiserror::Error; /// Represents the different capabilities that can be granted to plugins. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Capability { /// Allows the plugin to start an HTTP server on the specified port. - #[serde(rename = "http-server")] HttpServer(u16), /// Allows the plugin to make HTTP requests to the specified URL. - #[serde(rename = "http-client")] HttpClient(String), } +impl Serialize for Capability { + fn serialize(&self, serializer: S) -> std::prelude::v1::Result + where + S: serde::Serializer, + { + let string = self.to_string(); + serializer.serialize_str(&string) + } +} + +impl<'de> Deserialize<'de> for Capability { + fn deserialize(deserializer: D) -> std::prelude::v1::Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + s.parse().map_err(serde::de::Error::custom) + } +} + impl fmt::Display for Capability { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -33,7 +51,10 @@ impl FromStr for Capability { fn from_str(s: &str) -> Result { let parts: Vec<&str> = s.splitn(2, ':').collect(); match parts.as_slice() { - ["http-server", port] => port.parse().map(Capability::HttpServer).map_err(|_| CapabilityParseError::InvalidPort), + ["http-server", port] => port + .parse() + .map(Capability::HttpServer) + .map_err(|_| CapabilityParseError::InvalidPort), ["http-client", url] => Ok(Capability::HttpClient(url.to_string())), _ => Err(CapabilityParseError::InvalidFormat), } diff --git a/crates/litehouse-config/src/hash_read.rs b/crates/litehouse-config/src/hash_read.rs index 27ee030..71cbf6d 100644 --- a/crates/litehouse-config/src/hash_read.rs +++ b/crates/litehouse-config/src/hash_read.rs @@ -1,8 +1,8 @@ -use pin_project_lite::pin_project; use std::task::Poll; -use tokio::io::AsyncRead; use digest::Digest; +use pin_project_lite::pin_project; +use tokio::io::AsyncRead; pin_project! { pub struct HashRead { diff --git a/crates/litehouse-config/src/import.rs b/crates/litehouse-config/src/import.rs new file mode 100644 index 0000000..c1d1a50 --- /dev/null +++ b/crates/litehouse-config/src/import.rs @@ -0,0 +1,348 @@ +use std::{cmp::Ordering, fmt::Display, path::Path, str::FromStr}; + +use miette::Diagnostic; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::hash_read::HashRead; + +const REGISTRY_SEPARATOR: &str = "::"; +const VERSION_SEPARATOR: &str = "@"; +const SHA_SEPERATOR: &str = "~"; + +/// A plugin import. Serializes to a string with the format `registry::plugin` +#[derive(Debug, Clone)] +pub struct Import { + pub registry: Option, + pub plugin: String, + pub version: Option, + pub sha: Option, +} + +impl Import { + pub fn file_name(&self) -> String { + let version = self + .version + .as_ref() + .map(|v| format!("{}{}", VERSION_SEPARATOR, v)) + .unwrap_or_default(); + format!("{}{}.wasm", self.plugin, version) + } + + pub async fn read_sha(&mut self, base_dir: &Path) { + use futures::StreamExt; + + // if there is no version, we need to resolve it + if self.version.is_none() { + let files = tokio::fs::read_dir(base_dir).await.unwrap(); + let stream = tokio_stream::wrappers::ReadDirStream::new(files); + let max_version = stream + .filter_map(|entry| { + let import = Import::from_str( + entry + .unwrap() + .file_name() + .to_string_lossy() + .strip_suffix(".wasm") + .unwrap(), + ) + .unwrap(); + let plugin = &self.plugin; + async move { + if import.plugin.eq(plugin) { + Some(import) + } else { + None + } + } + }) + .collect::>() + .await + .into_iter() + .max(); + + if let Some(import) = max_version { + self.version = import.version; + } else { + return; + } + } + + let plugin_path = base_dir.join(self.file_name()); + let hasher = blake3::Hasher::new(); + let file = tokio::fs::File::open(plugin_path).await.unwrap(); + let mut hasher = HashRead::new(file, hasher); + tokio::io::copy(&mut hasher, &mut tokio::io::empty()) + .await + .unwrap(); + let output = hasher.finalize(); + let b: [u8; 32] = output.as_slice().try_into().unwrap(); + self.sha = Some(Blake3(b)); + } + + /// Verify that the plugin at this path matches + /// this import. This validates the version + /// via the file name as well as the sha if + /// one is specified. + pub async fn verify(&self, path: &Path) -> Option<()> { + self.sha.as_ref()?; + + let mut file = tokio::fs::File::open(path).await.unwrap(); + self.copy(&mut file, &mut tokio::io::empty()) + .await + .map(|_| ()) + } + + /// Copy the plugin from src to dest, validating the sha in the + /// process. + pub async fn copy( + &self, + src: R, + dest: &mut W, + ) -> Option { + let hasher = blake3::Hasher::new(); + let mut hasher = HashRead::new(src, hasher); + let bytes = tokio::io::copy(&mut hasher, dest).await.unwrap(); + let output = hasher.finalize(); + + if let Some(Blake3(sha)) = self.sha { + // maybe consider constant time comparison fn + if *output != sha { + eprintln!("sha mismatch\n got {:02X?}\n exp {:02X?}", &*output, sha); + return None; + } + } + + Some(bytes) + } + + /// Returns how specific `rhs` is relative to `self`. + /// + /// An import is considered more specific if for each + /// field that `self` is defined, `rhs` also has that + /// field, the values are equal, and that `rhs` also + /// specifies fields that `self` does not. + /// + /// If `self` and `rhs` both specify a field but they + /// are not equal, then `None` is returned. If fields + /// are set on each size that are not on the other, + /// then `None` is returned. + pub fn specificity(&self, rhs: &Import) -> Option { + // is lhs greater than rhs? + let mut left_view = false; + // is rhs greater than lhs? + let mut right_view = false; + + match (&self.plugin, &rhs.plugin) { + (l, r) if l != r => return None, + _ => {} + }; + + match (&self.registry, &rhs.registry) { + (Some(_), None) => left_view = true, + (None, Some(_)) => right_view = true, + (Some(l), Some(r)) if l != r => return None, + _ => {} + }; + + match (&self.version, &rhs.version) { + (Some(l), Some(r)) if l != r => return None, + (Some(_), None) => left_view = true, + (None, Some(_)) => right_view = true, + _ => {} + }; + + match (&self.sha, &rhs.sha) { + (Some(l), Some(r)) if l != r => return None, + (Some(_), None) => left_view = true, + (None, Some(_)) => right_view = true, + _ => {} + }; + + match (left_view, right_view) { + (true, true) => None, + (true, false) => Some(Ordering::Greater), + (false, true) => Some(Ordering::Less), + (false, false) => Some(Ordering::Equal), + } + } +} + +impl PartialEq for Import { + fn eq(&self, other: &Self) -> bool { + self.plugin == other.plugin && self.version == other.version && self.sha == other.sha + } +} + +impl Eq for Import {} + +impl PartialOrd for Import { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Import { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.plugin.cmp(&other.plugin) { + std::cmp::Ordering::Equal => self.version.cmp(&other.version), + other => other, + } + } +} + +impl Serialize for Import { + fn serialize(&self, serializer: S) -> std::prelude::v1::Result + where + S: serde::Serializer, + { + let string = self.to_string(); + serializer.serialize_str(&string) + } +} + +impl<'de> Deserialize<'de> for Import { + fn deserialize(deserializer: D) -> std::prelude::v1::Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + s.parse().map_err(serde::de::Error::custom) + } +} + +#[derive(Error, Debug, Diagnostic)] +#[error("failed to parse import")] +#[diagnostic( + code(import::invalid_format), + url(docsrs), + help("check the documentation for the correct format") +)] +pub enum ImportParseError { + SemverParseError(#[from] SemverParseError), + Blake3ParseError(#[from] Blake3ParseError), +} + +#[derive(Error, Debug, Diagnostic)] +#[error("failed to parse import")] +#[diagnostic( + code(import::invalid_format), + url(docsrs), + help("check the documentation for the correct format") +)] +pub struct SemverParseError { + #[source_code] + src: String, + + err: semver::Error, + + #[label("{err}")] + err_span: miette::SourceSpan, +} + +#[derive(Error, Debug, Diagnostic)] +#[error("failed to parse import")] +#[diagnostic( + code(import::invalid_format), + url(docsrs), + help("check the documentation for the correct format") +)] +pub struct Blake3ParseError { + #[source_code] + src: String, + + err: blake3::HexError, + + #[label("{err}")] + err_span: miette::SourceSpan, +} + +impl FromStr for Import { + type Err = ImportParseError; + + fn from_str(s: &str) -> Result { + let rest = s.strip_suffix(".wasm").unwrap_or(s); // remove file extension + let (registry, rest) = rest + .split_once(REGISTRY_SEPARATOR) + .map(|(registry, rest)| (Some(registry), rest)) + .unwrap_or((None, rest)); + let (sha, rest) = rest + .rsplit_once(SHA_SEPERATOR) + .map(|(rest, sha)| (Some(sha), rest)) + .unwrap_or((None, rest)); + let (package, version) = rest + .split_once(VERSION_SEPARATOR) + .map(|(package, version)| { + version + .parse() + .map(|v| (package, Some(v))) + .map_err(|e| (e, version)) + }) + .unwrap_or(Ok((rest, None))) + .map_err(|(e, version)| SemverParseError { + err: e, + src: s.to_string(), + err_span: s + .find(version) + .map(|i| i..i + version.len()) + .unwrap() + .into(), + })?; + + Ok(Import { + registry: registry.map(str::to_string), + plugin: package.to_string(), + version, + sha: sha + .map(|sha| { + Blake3::from_str(sha).map_err(|e| Blake3ParseError { + err: e, + err_span: s.find(sha).map(|i| i..i + s.len()).unwrap().into(), + src: s.to_string(), + }) + }) + .transpose()?, + }) + } +} + +impl Display for Import { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let registry = self + .registry + .as_deref() + .map(|s| format!("{}{}", s, REGISTRY_SEPARATOR)) + .unwrap_or_default(); + let version = self + .version + .as_ref() + .map(|v| format!("{}{}", VERSION_SEPARATOR, v)) + .unwrap_or_default(); + let sha = self + .sha + .as_ref() + .map(|v| format!("{}{}", SHA_SEPERATOR, v.to_string())) + .unwrap_or_default(); + + write!(f, "{}{}{}{}", registry, self.plugin, version, sha) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Blake3([u8; blake3::OUT_LEN]); + +impl FromStr for Blake3 { + type Err = blake3::HexError; + fn from_str(s: &str) -> Result { + let hash = s.strip_prefix("blake3:").unwrap(); + Ok(Self(blake3::Hash::from_hex(hash)?.as_bytes().to_owned())) + } +} + +impl Display for Blake3 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let hash = blake3::Hash::from_bytes(self.0); + write!(f, "blake3:{}", hash.to_hex()) + } +} diff --git a/crates/litehouse-config/src/lib.rs b/crates/litehouse-config/src/lib.rs index 04cea63..4316b7d 100644 --- a/crates/litehouse-config/src/lib.rs +++ b/crates/litehouse-config/src/lib.rs @@ -6,26 +6,20 @@ #![feature(let_chains)] +mod capabilities; mod hash_read; +mod import; mod parallelism; -mod capabilities; -use std::{ - cmp::Ordering, collections::HashMap, fmt::Display, num::NonZeroU8, path::Path, str::FromStr, -}; +use std::{cmp::Ordering, collections::HashMap, num::NonZeroU8}; -use hash_read::HashRead; -use miette::{Diagnostic, NamedSource, SourceOffset}; +use miette::{NamedSource, SourceOffset}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use thiserror::Error; -use tokio::io::{AsyncRead, AsyncWrite}; -use parallelism::SandboxStrategy; -use capabilities::Capability; -const REGISTRY_SEPARATOR: &str = "::"; -const VERSION_SEPARATOR: &str = "@"; -const SHA_SEPERATOR: &str = "~"; +pub use capabilities::Capability; +pub use import::Import; +pub use parallelism::SandboxStrategy; #[derive(JsonSchema, Serialize, Deserialize, Debug, Default)] pub struct LitehouseConfig { @@ -208,76 +202,6 @@ pub struct ParseError { pub err_span: miette::SourceSpan, } -#[derive(Debug, Clone)] -pub enum Capability { - HttpServer(usize), - HttpClient(String), -} - -impl Display for Capability { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Capability::HttpServer(port) => write!(f, "http-server:{}", port), - Capability::HttpClient(url) => write!(f, "http-client:{}", url), - } - } -} - -impl FromStr for Capability { - type Err = CapabilityParseError; - - fn from_str(s: &str) -> Result { - let (name, value) = s - .split_once(':') - .map(|(name, value)| (name, value.to_string())) - .ok_or_else(|| CapabilityParseError::MissingDelimiter)?; - match name { - "http-server" => Ok(value - .parse() - .map(Capability::HttpServer) - .map_err(|_| CapabilityParseError::InvalidPort(value)))?, - "http-client" => Ok(Capability::HttpClient(value)), - variant => Err(CapabilityParseError::UnknownVariant(variant.to_string())), - } - } -} - -#[derive(Error, Diagnostic, Debug)] -#[error("invalid capability")] -#[diagnostic( - code(config::invalid_capability), - url(docsrs), - help("check the capability name and value") -)] -pub enum CapabilityParseError { - #[error("unknown variant: {0}")] - UnknownVariant(String), - #[error("missing delimiter")] - MissingDelimiter, - #[error("invalid port: {0}")] - InvalidPort(String), -} - -impl Serialize for Capability { - fn serialize(&self, serializer: S) -> std::prelude::v1::Result - where - S: serde::Serializer, - { - let string = self.to_string(); - serializer.serialize_str(&string) - } -} - -impl<'de> Deserialize<'de> for Capability { - fn deserialize(deserializer: D) -> std::prelude::v1::Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - s.parse().map_err(serde::de::Error::custom) - } -} - #[derive(JsonSchema, Serialize, Deserialize, Debug)] pub struct Registry { /// The local name of the registry @@ -286,342 +210,6 @@ pub struct Registry { pub url: String, } -/// A plugin import. Serializes to a string with the format `registry::plugin` -#[derive(Debug, Clone)] -pub struct Import { - pub registry: Option, - pub plugin: String, - pub version: Option, - pub sha: Option, -} - -impl Import { - pub fn file_name(&self) -> String { - let version = self - .version - .as_ref() - .map(|v| format!("{}{}", VERSION_SEPARATOR, v)) - .unwrap_or_default(); - format!("{}{}.wasm", self.plugin, version) - } - - pub async fn read_sha(&mut self, base_dir: &Path) { - use futures::StreamExt; - - // if there is no version, we need to resolve it - if self.version.is_none() { - let files = tokio::fs::read_dir(base_dir).await.unwrap(); - let stream = tokio_stream::wrappers::ReadDirStream::new(files); - let max_version = stream - .filter_map(|entry| { - let import = Import::from_str( - entry - .unwrap() - .file_name() - .to_string_lossy() - .strip_suffix(".wasm") - .unwrap(), - ) - .unwrap(); - let plugin = &self.plugin; - async move { - if import.plugin.eq(plugin) { - Some(import) - } else { - None - } - } - }) - .collect::>() - .await - .into_iter() - .max(); - - if let Some(import) = max_version { - self.version = import.version; - } else { - return; - } - } - - let plugin_path = base_dir.join(self.file_name()); - let hasher = blake3::Hasher::new(); - let file = tokio::fs::File::open(plugin_path).await.unwrap(); - let mut hasher = HashRead::new(file, hasher); - tokio::io::copy(&mut hasher, &mut tokio::io::empty()) - .await - .unwrap(); - let output = hasher.finalize(); - let b: [u8; 32] = output.as_slice().try_into().unwrap(); - self.sha = Some(Blake3(b)); - } - - /// Verify that the plugin at this path matches - /// this import. This validates the version - /// via the file name as well as the sha if - /// one is specified. - pub async fn verify(&self, path: &Path) -> Option<()> { - self.sha.as_ref()?; - - let mut file = tokio::fs::File::open(path).await.unwrap(); - self.copy(&mut file, &mut tokio::io::empty()) - .await - .map(|_| ()) - } - - /// Copy the plugin from src to dest, validating the sha in the - /// process. - pub async fn copy( - &self, - src: R, - dest: &mut W, - ) -> Option { - let hasher = blake3::Hasher::new(); - let mut hasher = HashRead::new(src, hasher); - let bytes = tokio::io::copy(&mut hasher, dest).await.unwrap(); - let output = hasher.finalize(); - - if let Some(Blake3(sha)) = self.sha { - // maybe consider constant time comparison fn - if *output != sha { - eprintln!("sha mismatch\n got {:02X?}\n exp {:02X?}", &*output, sha); - return None; - } - } - - Some(bytes) - } - - /// Returns how specific `rhs` is relative to `self`. - /// - /// An import is considered more specific if for each - /// field that `self` is defined, `rhs` also has that - /// field, the values are equal, and that `rhs` also - /// specifies fields that `self` does not. - /// - /// If `self` and `rhs` both specify a field but they - /// are not equal, then `None` is returned. If fields - /// are set on each size that are not on the other, - /// then `None` is returned. - pub fn specificity(&self, rhs: &Import) -> Option { - // is lhs greater than rhs? - let mut left_view = false; - // is rhs greater than lhs? - let mut right_view = false; - - match (&self.plugin, &rhs.plugin) { - (l, r) if l != r => return None, - _ => {} - }; - - match (&self.registry, &rhs.registry) { - (Some(_), None) => left_view = true, - (None, Some(_)) => right_view = true, - (Some(l), Some(r)) if l != r => return None, - _ => {} - }; - - match (&self.version, &rhs.version) { - (Some(l), Some(r)) if l != r => return None, - (Some(_), None) => left_view = true, - (None, Some(_)) => right_view = true, - _ => {} - }; - - match (&self.sha, &rhs.sha) { - (Some(l), Some(r)) if l != r => return None, - (Some(_), None) => left_view = true, - (None, Some(_)) => right_view = true, - _ => {} - }; - - match (left_view, right_view) { - (true, true) => None, - (true, false) => Some(Ordering::Greater), - (false, true) => Some(Ordering::Less), - (false, false) => Some(Ordering::Equal), - } - } -} - -impl PartialEq for Import { - fn eq(&self, other: &Self) -> bool { - self.plugin == other.plugin && self.version == other.version && self.sha == other.sha - } -} - -impl Eq for Import {} - -impl PartialOrd for Import { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Import { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - match self.plugin.cmp(&other.plugin) { - std::cmp::Ordering::Equal => self.version.cmp(&other.version), - other => other, - } - } -} - -impl Serialize for Import { - fn serialize(&self, serializer: S) -> std::prelude::v1::Result - where - S: serde::Serializer, - { - let string = self.to_string(); - serializer.serialize_str(&string) - } -} - -impl<'de> Deserialize<'de> for Import { - fn deserialize(deserializer: D) -> std::prelude::v1::Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - s.parse().map_err(serde::de::Error::custom) - } -} - -#[derive(Error, Debug, Diagnostic)] -#[error("failed to parse import")] -#[diagnostic( - code(import::invalid_format), - url(docsrs), - help("check the documentation for the correct format") -)] -pub enum ImportParseError { - SemverParseError(#[from] SemverParseError), - Blake3ParseError(#[from] Blake3ParseError), -} - -#[derive(Error, Debug, Diagnostic)] -#[error("failed to parse import")] -#[diagnostic( - code(import::invalid_format), - url(docsrs), - help("check the documentation for the correct format") -)] -pub struct SemverParseError { - #[source_code] - src: String, - - err: semver::Error, - - #[label("{err}")] - err_span: miette::SourceSpan, -} - -#[derive(Error, Debug, Diagnostic)] -#[error("failed to parse import")] -#[diagnostic( - code(import::invalid_format), - url(docsrs), - help("check the documentation for the correct format") -)] -pub struct Blake3ParseError { - #[source_code] - src: String, - - err: blake3::HexError, - - #[label("{err}")] - err_span: miette::SourceSpan, -} - -impl FromStr for Import { - type Err = ImportParseError; - - fn from_str(s: &str) -> Result { - let rest = s.strip_suffix(".wasm").unwrap_or(s); // remove file extension - let (registry, rest) = rest - .split_once(REGISTRY_SEPARATOR) - .map(|(registry, rest)| (Some(registry), rest)) - .unwrap_or((None, rest)); - let (sha, rest) = rest - .rsplit_once(SHA_SEPERATOR) - .map(|(rest, sha)| (Some(sha), rest)) - .unwrap_or((None, rest)); - let (package, version) = rest - .split_once(VERSION_SEPARATOR) - .map(|(package, version)| { - version - .parse() - .map(|v| (package, Some(v))) - .map_err(|e| (e, version)) - }) - .unwrap_or(Ok((rest, None))) - .map_err(|(e, version)| SemverParseError { - err: e, - src: s.to_string(), - err_span: s - .find(version) - .map(|i| i..i + version.len()) - .unwrap() - .into(), - })?; - - Ok(Import { - registry: registry.map(str::to_string), - plugin: package.to_string(), - version, - sha: sha - .map(|sha| { - Blake3::from_str(sha).map_err(|e| Blake3ParseError { - err: e, - err_span: s.find(sha).map(|i| i..i + s.len()).unwrap().into(), - src: s.to_string(), - }) - }) - .transpose()?, - }) - } -} - -impl Display for Import { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let registry = self - .registry - .as_deref() - .map(|s| format!("{}{}", s, REGISTRY_SEPARATOR)) - .unwrap_or_default(); - let version = self - .version - .as_ref() - .map(|v| format!("{}{}", VERSION_SEPARATOR, v)) - .unwrap_or_default(); - let sha = self - .sha - .as_ref() - .map(|v| format!("{}{}", SHA_SEPERATOR, v.to_string())) - .unwrap_or_default(); - - write!(f, "{}{}{}{}", registry, self.plugin, version, sha) - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct Blake3([u8; blake3::OUT_LEN]); - -impl FromStr for Blake3 { - type Err = blake3::HexError; - fn from_str(s: &str) -> Result { - let hash = s.strip_prefix("blake3:").unwrap(); - Ok(Self(blake3::Hash::from_hex(hash)?.as_bytes().to_owned())) - } -} - -impl Display for Blake3 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let hash = blake3::Hash::from_bytes(self.0); - write!(f, "blake3:{}", hash.to_hex()) - } -} - #[derive(JsonSchema, Serialize, Deserialize, Debug)] pub struct PluginInstance { #[schemars(with = "String")] @@ -631,9 +219,12 @@ pub struct PluginInstance { #[cfg(test)] mod test { - use super::*; + use std::str::FromStr; + use test_case::test_case; + use super::*; + #[test_case("package" ; "just package")] #[test_case("registry::package" ; "registry")] #[test_case("registry::package@1.0.0" ; "version")] @@ -647,17 +238,29 @@ mod test { assert_eq!(package.plugin, "package"); } - #[test_case(&[], "registry::package" => ImportAddResult::Added ; "basic case")] - #[test_case(&["registry::package"], "registry::package" => ImportAddResult::Ignored; "duplicate")] - #[test_case(&["registry::package"], "registry::package@1.0.0" => ImportAddResult::Replaced ; "add more specific version should overwrite")] - #[test_case(&["registry::package@1.0.0"], "registry::package" => ImportAddResult::Ignored ; "add less specific version should be ignored")] - #[test_case(&["package@1.0.1"], "package@1.0.2" => ImportAddResult::Added ; "incompatible imports should be added")] - fn add_import(list: &[&str], import: &str) -> ImportAddResult { + #[test_case(&[], "registry::package" => matches ImportAddResult::Added(_) ; "basic case")] + #[test_case(&["registry::package"], "registry::package" => matches ImportAddResult::Ignored(_); "duplicate")] + #[test_case(&["registry::package"], "registry::package@1.0.0" => matches ImportAddResult::Replaced(_) ; "add more specific version should overwrite")] + #[test_case(&["registry::package@1.0.0"], "registry::package" => matches ImportAddResult::Ignored(_) ; "add less specific version should be ignored")] + #[test_case(&["package@1.0.1"], "package@1.0.2" => matches ImportAddResult::Added(_) ; "incompatible imports should be added")] + fn add_import(list: &[&str], import: &str) -> ImportAddResult<'static> { let mut config = LitehouseConfig { imports: list.iter().map(|s| s.parse().unwrap()).collect(), ..Default::default() }; - config.add_import(import.parse().unwrap()) + + // just leak... + match config.add_import(import.parse().unwrap()) { + ImportAddResult::Added(x) => { + let x = Box::leak(Box::new(x.to_owned())); + ImportAddResult::Added(x) + } + ImportAddResult::Ignored(x) => { + let x = Box::leak(Box::new(x.to_owned())); + ImportAddResult::Ignored(x) + } + ImportAddResult::Replaced(x) => ImportAddResult::Replaced(x), + } } #[test_case("package", "package" => Some(Ordering::Equal) ; "same package")] diff --git a/crates/litehouse-config/src/parallelism.rs b/crates/litehouse-config/src/parallelism.rs index 26dd401..f9ed34c 100644 --- a/crates/litehouse-config/src/parallelism.rs +++ b/crates/litehouse-config/src/parallelism.rs @@ -3,10 +3,11 @@ //! This module explains the different parallelism strategies available in Litehouse for running plugins. //! These strategies determine how plugin instances are sandboxed and executed in parallel. +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; /// Defines the strategy to use for sandboxing plugins. -#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Default, JsonSchema)] pub enum SandboxStrategy { /// All plugins are run in the same storage sandbox. #[serde(rename = "global")] @@ -16,25 +17,6 @@ pub enum SandboxStrategy { Plugin, /// Each plugin instance is run in its own storage sandbox. #[serde(rename = "instance")] + #[default] Instance, } - -impl SandboxStrategy { - /// Provides a detailed explanation of the sandbox strategy. - pub fn description(&self) -> &'static str { - match self { - SandboxStrategy::Global => "All plugins are run in the same storage sandbox. This strategy is useful for environments with limited resources, as it minimizes the number of sandboxes required. However, it also means that all plugins share the same storage space, which can lead to conflicts and security concerns.", - SandboxStrategy::Plugin => "Each plugin type is run in its own storage sandbox. This strategy provides a balance between resource usage and isolation. Plugins of the same type share a sandbox, which can be useful for sharing state between instances of the same plugin.", - SandboxStrategy::Instance => "Each plugin instance is run in its own storage sandbox. This strategy provides the highest level of isolation, ensuring that each plugin instance has its own separate storage space. It is the default strategy and recommended for most use cases.", - } - } -} - -/// Example usage of sandbox strategies. -/// -/// This function demonstrates how to select and use a sandbox strategy for a hypothetical plugin. -pub fn example_usage() { - let strategy = SandboxStrategy::Instance; - println!("Selected sandbox strategy: {:?}", strategy); - println!("Description: {}", strategy.description()); -}