diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cbecedb..4031864 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,8 @@ jobs: steps: - name: checkout code uses: actions/checkout@v4 + with: + submodules: "recursive" - name: install rust uses: dtolnay/rust-toolchain@nightly diff --git a/Cargo.lock b/Cargo.lock index 17d3a58..26e1f80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 4 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] -name = "adler2" -version = "2.0.0" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" @@ -66,17 +66,17 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", - "windows-targets", ] [[package]] @@ -175,6 +175,33 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "color-eyre" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -249,6 +276,8 @@ version = "0.1.0" dependencies = [ "async-trait", "bollard", + "cfg-if", + "color-eyre", "dockworker", "futures", "futures-util", @@ -302,6 +331,16 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.3.0" @@ -452,9 +491,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "h2" @@ -804,6 +843,12 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -848,6 +893,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + [[package]] name = "libc" version = "0.2.169" @@ -907,11 +958,11 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ - "adler2", + "adler", ] [[package]] @@ -968,9 +1019,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -1025,6 +1076,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + [[package]] name = "parking_lot" version = "0.12.3" @@ -1425,6 +1482,15 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shell-words" version = "1.1.0" @@ -1621,6 +1687,16 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "time" version = "0.3.37" @@ -1780,6 +1856,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "sharded-slab", + "thread_local", + "tracing-core", ] [[package]] @@ -1838,6 +1936,12 @@ dependencies = [ "getrandom", ] +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index b1c7899..48447c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,33 +3,16 @@ name = "dockworker" version = "0.1.0" edition = "2021" -[features] -default = [] -docker = [ - "async-trait", - "bollard", - "futures", - "futures-util", - "ipnet", - "reqwest", - "tar", - "tempfile", - "tokio", - "tracing", - "sysinfo", - "uuid", - "walkdir", -] - [dependencies] # Core dependencies (always included) +cfg-if = "1.0.0" regex = "1.11.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" shell-words = "1.1" thiserror = "2.0" -log = "0.4.25" +log = "0.4" # Optional dependencies (only with deploy feature) bollard = { version = "0.17", optional = true } @@ -46,9 +29,30 @@ futures = { version = "0.3.31", optional = true } walkdir = { version = "2.5.0", optional = true } tracing = { version = "0.1.41", optional = true } +# Used in tests +color-eyre = { version = "0.6.3", optional = true } + [dev-dependencies] -dockworker = { path = ".", features = ["docker"] } +dockworker = { path = ".", features = ["docker", "testing"] } tokio = { version = "1.0", features = ["full"] } pretty_assertions = "1.4.1" reqwest = "0.12.12" +[features] +default = [] +docker = [ + "async-trait", + "bollard", + "futures", + "futures-util", + "ipnet", + "reqwest", + "tar", + "tempfile", + "tokio", + "tracing", + "sysinfo", + "uuid", + "walkdir", +] +testing = ["dep:color-eyre"] \ No newline at end of file diff --git a/src/builder/compose.rs b/src/builder/compose.rs index 405b3d1..8c61b16 100644 --- a/src/builder/compose.rs +++ b/src/builder/compose.rs @@ -2,10 +2,9 @@ use crate::{ config::{ compose::{ComposeConfig, Service}, health::HealthCheck, - volume::VolumeType, + volume::Volume, }, error::DockerError, - parser::compose::ComposeParser, DockerBuilder, }; use bollard::container::{Config, CreateContainerOptions, StartContainerOptions}; @@ -16,116 +15,10 @@ use std::collections::HashMap; use std::path::{Path, PathBuf}; use tar; use tempfile; -use tokio::fs; use uuid::Uuid; use walkdir; impl DockerBuilder { - /// Creates a new Docker Compose configuration from a file with environment variables - /// - /// This method reads both the compose file and an environment file, then - /// performs variable substitution before parsing. - /// - /// # Arguments - /// - /// * `path` - Path to the Docker Compose file - /// * `env_path` - Path to the environment file - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let config = builder - /// .from_compose_with_env(Path::new("docker-compose.yml"), Path::new(".env")) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_compose_with_env>( - &self, - path: P, - env_path: P, - ) -> Result { - ComposeParser::from_file_with_env(path, env_path).await - } - - /// Creates a new Docker Compose configuration from a file with environment variables provided as a HashMap - /// - /// This method is useful when you want to provide environment variables programmatically rather than from a file. - /// - /// # Arguments - /// - /// * `path` - Path to the Docker Compose file - /// * `env_vars` - HashMap containing environment variable key-value pairs - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use std::collections::HashMap; - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let mut env_vars = HashMap::new(); - /// env_vars.insert("VERSION".to_string(), "1.0".to_string()); - /// let config = builder - /// .from_compose_with_env_map(Path::new("docker-compose.yml"), &env_vars) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_compose_with_env_map>( - &self, - path: P, - env_vars: &HashMap, - ) -> Result { - ComposeParser::from_file_with_env_map(path, env_vars).await - } - - /// Creates a new Docker Compose configuration from a file - /// - /// This is a simple wrapper around `ComposeParser::parse()` that reads the file contents first. - /// - /// # Arguments - /// - /// * `path` - Path to the Docker Compose file - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let config = builder - /// .from_compose(Path::new("docker-compose.yml")) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_compose>( - &self, - path: P, - ) -> Result { - let content = fs::read_to_string(path).await?; - ComposeParser::parse(&content) - } - /// Deploys a Docker Compose configuration with a custom base directory /// /// This method deploys services defined in a Docker Compose configuration, using the specified @@ -152,13 +45,12 @@ impl DockerBuilder { /// # use dockworker::DockerBuilder; /// # use dockworker::parser::ComposeParser; /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let mut config = ComposeParser::from_file(Path::new("docker-compose.yml")).await?; - /// let container_ids = builder - /// .deploy_compose_with_base_dir(&mut config, std::env::current_dir()?) - /// .await?; - /// # Ok(()) - /// # } + /// let compose_path = "docker-compose.yml"; + /// + /// let builder = DockerBuilder::new().await?; + /// let mut config = ComposeParser::new().parse_from_path(compose_path)?; + /// let container_ids = builder.deploy_compose(&mut config).await?; + /// # Ok(()) } /// ``` /// /// # Errors @@ -197,21 +89,22 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # use dockworker::parser::ComposeParser; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let mut config = ComposeParser::parse( - /// r#" + /// use dockworker::parser::ComposeParser; + /// use dockworker::DockerBuilder; + /// + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::DockerError> { + /// let compose_file = r#" /// version: "3" /// services: /// web: /// image: nginx - /// "#, - /// )?; + /// "#; + /// + /// let builder = DockerBuilder::new().await?; + /// let mut config = ComposeParser::new().parse(&mut compose_file.as_bytes())?; /// let container_ids = builder.deploy_compose(&mut config).await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` /// /// # Errors @@ -246,7 +139,7 @@ impl DockerBuilder { // Create volumes defined in the compose file for (volume_name, volume_type) in &config.volumes { - if let VolumeType::Named(_) = volume_type { + if let Volume::Named(_) = volume_type { self.client .create_volume(bollard::volume::CreateVolumeOptions { name: volume_name.to_string(), @@ -309,7 +202,7 @@ impl DockerBuilder { // Configure mounts if volumes are specified if let Some(volumes) = &service.volumes { - let mounts: Vec = volumes.iter().map(|vol| vol.to_mount()).collect(); + let mounts: Vec = volumes.iter().cloned().map(Mount::from).collect(); host_config.mounts = Some(mounts); } @@ -554,8 +447,9 @@ impl DockerBuilder { /// # Examples /// /// ```rust - /// # use dockworker::{DockerBuilder, config::compose::Service}; - /// # use std::collections::HashMap; + /// use dockworker::{config::compose::Service, DockerBuilder}; + /// use std::collections::HashMap; + /// /// # fn example() { /// let mut service = Service::default(); /// let mut env = HashMap::new(); @@ -600,7 +494,7 @@ impl DockerBuilder { for service in config.services.values_mut() { if let Some(volumes) = &mut service.volumes { for volume in volumes.iter_mut() { - if let VolumeType::Bind { source, .. } = volume { + if let Volume::Bind { source, .. } = volume { let absolute_path = Self::normalize_path(&base, source)?; *source = absolute_path.to_string_lossy().into_owned(); } @@ -633,22 +527,3 @@ impl DockerBuilder { } } } - -// Helper function to parse memory strings like "1G", "512M" into bytes -pub fn parse_memory_string(memory: &str) -> Result { - let len = memory.len(); - let (num, unit) = memory.split_at(len - 1); - let base = num.parse::().map_err(|_| { - DockerError::InvalidResourceLimit(format!("Invalid memory value: {}", memory)) - })?; - - match unit.to_uppercase().as_str() { - "K" => Ok(base * 1024), - "M" => Ok(base * 1024 * 1024), - "G" => Ok(base * 1024 * 1024 * 1024), - _ => Err(DockerError::InvalidResourceLimit(format!( - "Invalid memory unit: {}", - unit - ))), - } -} diff --git a/src/builder/docker_file.rs b/src/builder/docker_file.rs index d3b1040..6f6ad15 100644 --- a/src/builder/docker_file.rs +++ b/src/builder/docker_file.rs @@ -1,52 +1,13 @@ -use crate::{ - config::docker_file::DockerfileConfig, error::DockerError, - parser::docker_file::DockerfileParser, -}; +use crate::config::docker_file::DockerfileConfig; +use crate::error::DockerError; use bollard::container::{Config, CreateContainerOptions, StartContainerOptions}; use bollard::image::BuildImageOptions; use bollard::service::HostConfig; use futures_util::StreamExt; -use std::path::Path; -use tokio::fs; use super::DockerBuilder; impl DockerBuilder { - /// Creates a new Dockerfile configuration from a file - /// - /// This method reads a Dockerfile and parses it into a structured configuration. - /// It handles basic Dockerfile syntax including: - /// - Line continuations with backslash - /// - Comments starting with # - /// - Basic Dockerfile commands like FROM, COPY, etc. - /// - /// # Arguments - /// - /// * `path` - Path to the Dockerfile - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `DockerfileConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let config = builder.from_dockerfile(Path::new("Dockerfile")).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_dockerfile>( - &self, - path: P, - ) -> Result { - let content = fs::read_to_string(path).await?; - DockerfileParser::parse(&content) - } - /// Deploys a Dockerfile configuration with optional settings /// /// This method builds a Docker image from a Dockerfile configuration and creates a container from it. @@ -71,26 +32,36 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::{DockerBuilder, config::docker_file::{DockerCommand, DockerfileConfig}}; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; - /// let config = DockerfileConfig { // Your Dockerfile config + /// use dockworker::config::docker_file::{DockerCommand, DockerfileConfig}; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; + /// let config = DockerfileConfig { + /// // Your Dockerfile config /// base_image: "ubuntu:latest".to_string(), /// commands: vec![ - /// DockerCommand::Run { command: "apt-get update".to_string() }, - /// DockerCommand::Copy { source: "app".to_string(), dest: "/app".to_string(), chown: None }, - /// ] + /// DockerCommand::Run { + /// command: "apt-get update".to_string(), + /// }, + /// DockerCommand::Copy { + /// source: "app".to_string(), + /// dest: "/app".to_string(), + /// chown: None, + /// }, + /// ], /// }; - /// let container_id = builder.deploy_dockerfile( - /// &config, - /// "my-image:latest", - /// Some(vec!["echo".to_string(), "hello".to_string()]), - /// None, - /// None, - /// None - /// ).await?; - /// # Ok(()) - /// # } + /// let container_id = builder + /// .deploy_dockerfile( + /// &config, + /// "my-image:latest", + /// Some(vec!["echo".to_string(), "hello".to_string()]), + /// None, + /// None, + /// None, + /// ) + /// .await?; + /// # Ok(()) } /// ``` pub async fn deploy_dockerfile( &self, @@ -106,7 +77,7 @@ impl DockerBuilder { let dockerfile_path = temp_dir.path().join("Dockerfile"); // Write the Dockerfile content from our config - tokio::fs::write(&dockerfile_path, config.to_dockerfile_content()).await?; + tokio::fs::write(&dockerfile_path, config.to_string()).await?; // Create tar archive with the Dockerfile let tar_path = temp_dir.path().join("context.tar"); diff --git a/src/builder/management.rs b/src/builder/management.rs index c761168..7384e24 100644 --- a/src/builder/management.rs +++ b/src/builder/management.rs @@ -19,10 +19,11 @@ impl DockerBuilder { /// /// # Examples /// ```no_run - /// # use std::time::Duration; - /// # use std::collections::HashMap; - /// # use dockworker::{DockerBuilder, DockerError}; - /// # async fn example(builder: DockerBuilder) -> Result<(), DockerError> { + /// use dockworker::DockerBuilder; + /// use std::collections::HashMap; + /// use std::time::Duration; + /// + /// # async fn example(builder: DockerBuilder) -> Result<(), dockworker::DockerError> { /// // Create a network with retries /// let mut labels = HashMap::new(); /// labels.insert("env".to_string(), "prod".to_string()); @@ -30,8 +31,7 @@ impl DockerBuilder { /// builder /// .create_network_with_retry("my-network", 3, Duration::from_secs(1), Some(labels)) /// .await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn create_network_with_retry( &self, @@ -86,12 +86,12 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// builder.remove_network("my-network").await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn remove_network(&self, name: &str) -> Result<(), DockerError> { self.get_client() @@ -117,9 +117,10 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// /// // Pull with default platform /// builder.pull_image("ubuntu:latest", None).await?; @@ -128,14 +129,13 @@ impl DockerBuilder { /// builder /// .pull_image("ubuntu:latest", Some("linux/arm64")) /// .await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn pull_image(&self, image: &str, platform: Option<&str>) -> Result<(), DockerError> { let mut pull_stream = self.client.create_image( Some(bollard::image::CreateImageOptions { from_image: image, - platform: platform.unwrap_or("linux/amd64"), + platform: platform.unwrap_or(""), ..Default::default() }), None, @@ -163,15 +163,15 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// let networks = builder.list_networks().await?; /// for network in networks { /// println!("Found network: {}", network); /// } - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn list_networks(&self) -> Result, DockerError> { let networks = self @@ -198,12 +198,12 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// builder.create_volume("my_volume").await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn create_volume(&self, name: &str) -> Result<(), DockerError> { self.get_client() @@ -233,12 +233,12 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// builder.remove_volume("my_volume").await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn remove_volume(&self, name: &str) -> Result<(), DockerError> { self.get_client() @@ -259,15 +259,15 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// let volumes = builder.list_volumes().await?; /// for volume in volumes { /// println!("Found volume: {}", volume); /// } - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn list_volumes(&self) -> Result, DockerError> { let volumes = self @@ -301,12 +301,12 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// builder.wait_for_container("container_id").await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn wait_for_container(&self, container_id: &str) -> Result<(), DockerError> { let mut retries = 5; @@ -350,13 +350,13 @@ impl DockerBuilder { /// # Examples /// /// ```rust,no_run - /// # use dockworker::DockerBuilder; - /// # async fn example() -> Result<(), Box> { - /// let builder = DockerBuilder::new()?; + /// use dockworker::DockerBuilder; + /// + /// # async fn example() -> Result<(), dockworker::DockerError> { + /// let builder = DockerBuilder::new().await?; /// let logs = builder.get_container_logs("container_id").await?; /// println!("Container logs: {}", logs); - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` pub async fn get_container_logs(&self, container_id: &str) -> Result { let mut output = String::new(); diff --git a/src/config/compose.rs b/src/config/compose.rs index 8b8f3d4..a057fa2 100644 --- a/src/config/compose.rs +++ b/src/config/compose.rs @@ -1,4 +1,4 @@ -use super::volume::VolumeType; +use super::volume::Volume; use super::EnvironmentVars; use crate::config::health::HealthCheck; use crate::config::requirements::SystemRequirements; @@ -6,7 +6,6 @@ use crate::error::DockerError; use regex::Regex; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; -use std::path::{Path, PathBuf}; /// Configuration for a single service in a Docker Compose file #[derive(Default, Debug, Clone, Serialize, Deserialize)] @@ -18,7 +17,7 @@ pub struct Service { pub command: Option>, pub environment: Option, pub env_file: Option>, - pub volumes: Option>, + pub volumes: Option>, pub depends_on: Option>, pub ports: Option>, pub networks: Option>, @@ -73,7 +72,7 @@ pub struct ComposeConfig { pub services: HashMap, /// Map of volume name to volume configuration #[serde(default)] - pub volumes: HashMap, + pub volumes: HashMap, } impl Default for ComposeConfig { @@ -199,7 +198,7 @@ impl ComposeConfig { for service in self.services.values() { if let Some(volumes) = &service.volumes { for volume in volumes { - if let VolumeType::Named(name) = volume { + if let Volume::Named(name) = volume { let volume_name = name.split(':').next().unwrap_or(name).to_string(); if !self.volumes.contains_key(&volume_name) { used_volumes.insert(volume_name, volume.clone()); @@ -216,16 +215,16 @@ impl ComposeConfig { /// Resolves environment variables in the configuration pub fn resolve_env(&mut self, env_vars: &HashMap) { // Helper function to resolve env vars in a volume - fn resolve_volume(volume: &mut VolumeType, env_vars: &HashMap) { + fn resolve_volume(volume: &mut Volume, env_vars: &HashMap) { match volume { - VolumeType::Named(name) => { + Volume::Named(name) => { *name = ComposeConfig::resolve_env_value(name, env_vars); } - VolumeType::Bind { source, target, .. } => { + Volume::Bind { source, target, .. } => { *source = ComposeConfig::resolve_env_value(source, env_vars); *target = ComposeConfig::resolve_env_value(target, env_vars); } - VolumeType::Config { + Volume::Config { name, driver, driver_opts, @@ -295,154 +294,4 @@ impl ComposeConfig { } result } - - /// Loads and processes all environment variables from: - /// - System environment (lowest priority) - /// - Environment files in order: - /// 1. Common env files (e.g., envs/common/*.env) - /// 2. Network-specific env files (e.g., envs/op-mainnet/*.env) - /// 3. Service-specific env files (from env_file directive) - /// - Service-specific environment variables (highest priority) - pub fn process_environment( - &mut self, - base_dir: &Path, - ) -> Result, DockerError> { - // Start with system environment variables - let mut env_vars = std::env::vars().collect::>(); - println!("Loaded system environment variables"); - - // First pass: load common env files - let common_env_dir = base_dir.join("envs").join("common"); - if common_env_dir.exists() { - let entries = std::fs::read_dir(&common_env_dir).map_err(|e| { - DockerError::ValidationError(format!("Failed to read common env directory: {}", e)) - })?; - for entry in entries { - let entry = entry.map_err(|e| { - DockerError::ValidationError(format!("Failed to read directory entry: {}", e)) - })?; - if entry.path().extension().and_then(|s| s.to_str()) == Some("env") { - println!("Loading common env file: {}", entry.path().display()); - match std::fs::read_to_string(entry.path()) { - Ok(content) => { - let file_vars = - crate::parser::compose::ComposeParser::parse_env_file(&content)?; - for (key, value) in file_vars { - env_vars.entry(key).or_insert(value); - } - } - Err(e) => { - println!( - "Warning: Failed to read env file {}: {}", - entry.path().display(), - e - ); - } - } - } - } - } - - // Second pass: resolve and load service env_files - for service in self.services.values() { - if let Some(env_files) = &service.env_file { - for env_file in env_files { - // Resolve variables in the env file path - let resolved_path = Self::resolve_env_value(env_file, &env_vars); - - // Make path absolute - let env_path = if Path::new(&resolved_path).is_absolute() { - PathBuf::from(resolved_path) - } else { - base_dir.join(resolved_path) - }; - - println!("Loading service env file: {}", env_path.display()); - - // Read and parse env file if it exists - match std::fs::read_to_string(&env_path) { - Ok(content) => { - let file_vars = - crate::parser::compose::ComposeParser::parse_env_file(&content)?; - env_vars.extend(file_vars); - } - Err(e) => { - println!( - "Warning: Failed to read env file {}: {}", - env_path.display(), - e - ); - // Only fail if the file is required (no default value in the path) - if env_file.contains("${") && !env_file.contains(":-") { - return Err(DockerError::ValidationError(format!( - "Required env file {} not found: {}", - env_path.display(), - e - ))); - } - } - } - } - } - } - - // Final pass: add service-specific environment variables - for service in self.services.values() { - if let Some(service_env) = &service.environment { - // Service environment variables take highest precedence - env_vars.extend(service_env.iter().map(|(k, v)| (k.clone(), v.clone()))); - } - } - - // Validate that all required variables are present - self.validate_environment(&env_vars)?; - - // Resolve all variables in the config using the complete env_vars - self.resolve_env(&env_vars); - - Ok(env_vars) - } - - /// Validates that all required environment variables are present - fn validate_environment(&self, env_vars: &HashMap) -> Result<(), DockerError> { - for (service_name, service) in &self.services { - // Check env_file paths can be resolved - if let Some(env_files) = &service.env_file { - for env_file in env_files { - if env_file.contains("${") && !env_file.contains(":-") { - let var_name = env_file - .split("${") - .nth(1) - .and_then(|s| s.split("}").next()) - .ok_or_else(|| { - DockerError::ValidationError(format!( - "Invalid environment variable syntax in env_file path: {}", - env_file - )) - })?; - if !env_vars.contains_key(var_name) { - return Err(DockerError::ValidationError(format!( - "Service '{}' is missing required environment variable '{}' for env_file path", - service_name, var_name - ))); - } - } - } - } - - // Check service environment variables - if let Some(env) = &service.environment { - for (key, value) in env.iter() { - if value.contains("${") && !value.contains(":-") && !env_vars.contains_key(key) - { - return Err(DockerError::ValidationError(format!( - "Service '{}' is missing required environment variable: {}", - service_name, key - ))); - } - } - } - } - Ok(()) - } } diff --git a/src/config/docker_file.rs b/src/config/docker_file.rs index b5eac74..64afeb1 100644 --- a/src/config/docker_file.rs +++ b/src/config/docker_file.rs @@ -1,6 +1,8 @@ +use crate::DockerError; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fmt::Display; +use std::path::Path; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DockerfileConfig { @@ -8,6 +10,65 @@ pub struct DockerfileConfig { pub commands: Vec, } +impl DockerfileConfig { + /// Parse a Dockerfile + /// + /// This handles basic Dockerfile syntax including: + /// - Line continuations with backslash + /// - Comments starting with # + /// - Basic Dockerfile commands like FROM, COPY, etc. + /// + /// # Example + /// + /// ```rust,no_run + /// use dockworker::config::DockerfileConfig; + /// + /// let content = r#" + /// FROM ubuntu:latest + /// COPY . /app + /// RUN cargo build + /// "#; + /// + /// let config = DockerfileConfig::parse(content).unwrap(); + /// ``` + /// + /// # Errors + /// + /// Returns [`DockerError::DockerfileError`] if: + /// - Command syntax is invalid + /// - Required arguments are missing + /// - Command is not recognized + pub fn parse>(content: S) -> Result { + crate::parser::docker_file::parse(content.as_ref()) + } + + /// Parse a Dockerfile configuration from a file + /// + /// See [`DockerfileConfig::parse`]. + /// + /// # Examples + /// + /// ```rust,no_run + /// use dockworker::config::DockerfileConfig; + /// + /// let config = DockerfileConfig::parse_from_path("Dockerfile").unwrap(); + /// ``` + pub fn parse_from_path>(path: P) -> Result { + let content = std::fs::read_to_string(path.as_ref())?; + Self::parse(content) + } +} + +impl Display for DockerfileConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "FROM {}", self.base_image)?; + for command in &self.commands { + writeln!(f, "{}", command)?; + } + Ok(()) + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(tag = "type")] pub enum DockerCommand { @@ -76,18 +137,6 @@ pub enum DockerCommand { }, } -impl DockerfileConfig { - pub fn to_dockerfile_content(&self) -> String { - let mut content = format!("FROM {}\n", self.base_image); - - for command in &self.commands { - content.push_str(&format!("{}\n", command)); - } - - content - } -} - impl Display for DockerCommand { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let str = match self { diff --git a/src/config/health.rs b/src/config/health.rs index 6cc18a2..474298c 100644 --- a/src/config/health.rs +++ b/src/config/health.rs @@ -1,5 +1,5 @@ -use crate::error::DockerError; use serde::{Deserialize, Serialize}; +use std::fmt::Display; use std::time::Duration; #[cfg(feature = "docker")] @@ -8,10 +8,34 @@ use reqwest::Client; #[cfg(feature = "docker")] use tokio::time::sleep; +#[cfg(feature = "docker")] +#[derive(Debug, thiserror::Error)] +pub enum HealthCheckError { + #[error("Health check failed: {0}")] + Request(#[from] reqwest::Error), + #[error("Health check failed: expected status {expected}, got {actual}")] + UnexpectedStatus { expected: u16, actual: u16 }, +} + +#[derive(Debug, Copy, Clone, Serialize, Deserialize)] +pub enum Method { + Get, + Post, +} + +impl Display for Method { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Method::Get => write!(f, "GET"), + Method::Post => write!(f, "POST"), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct HealthCheck { pub endpoint: String, - pub method: String, + pub method: Method, pub expected_status: u16, pub body: Option, #[serde(with = "duration_serde")] @@ -44,7 +68,7 @@ pub(crate) mod duration_serde { #[cfg(feature = "docker")] impl HealthCheck { - pub async fn check(&self) -> Result<(), DockerError> { + pub async fn check(&self) -> Result<(), HealthCheckError> { let client = Client::new(); let mut attempts = 0; @@ -64,15 +88,10 @@ impl HealthCheck { Ok(()) } - async fn perform_check(&self, client: &Client) -> Result<(), DockerError> { - let mut request = match self.method.to_uppercase().as_str() { - "GET" => client.get(&self.endpoint), - "POST" => client.post(&self.endpoint), - _ => { - return Err(DockerError::ValidationError( - "Unsupported HTTP method".into(), - )); - } + async fn perform_check(&self, client: &Client) -> Result<(), HealthCheckError> { + let mut request = match self.method { + Method::Get => client.get(&self.endpoint), + Method::Post => client.post(&self.endpoint), }; if let Some(body) = &self.body { @@ -83,14 +102,14 @@ impl HealthCheck { .timeout(self.timeout) .send() .await - .map_err(|e| DockerError::ValidationError(format!("Health check failed: {}", e)))?; - - if response.status().as_u16() != self.expected_status { - return Err(DockerError::ValidationError(format!( - "Health check failed: expected status {}, got {}", - self.expected_status, - response.status() - ))); + .map_err(HealthCheckError::Request)?; + + let status = response.status().as_u16(); + if status != self.expected_status { + return Err(HealthCheckError::UnexpectedStatus { + expected: self.expected_status, + actual: status, + }); } Ok(()) @@ -106,7 +125,7 @@ mod tests { async fn test_health_check_success() { let health_check = HealthCheck { endpoint: "https://httpbin.org/status/200".to_string(), - method: "GET".to_string(), + method: Method::Get, expected_status: 200, body: None, interval: Duration::from_secs(1), @@ -121,7 +140,7 @@ mod tests { async fn test_health_check_failure() { let health_check = HealthCheck { endpoint: "https://httpbin.org/status/500".to_string(), - method: "GET".to_string(), + method: Method::Get, expected_status: 200, body: None, interval: Duration::from_secs(1), diff --git a/src/config/mod.rs b/src/config/mod.rs index 0dbb785..799c46e 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -6,6 +6,7 @@ pub mod requirements; pub mod volume; pub use compose::*; +pub use docker_file::*; pub use env_vars::*; pub use health::*; pub use requirements::*; diff --git a/src/config/requirements.rs b/src/config/requirements.rs index cd646bf..3c573a5 100644 --- a/src/config/requirements.rs +++ b/src/config/requirements.rs @@ -125,3 +125,33 @@ pub fn parse_memory_string(memory: &str) -> Result { ))), } } + +#[cfg(test)] +mod tests { + use super::parse_memory_string; + use crate::error::DockerError; + + #[test] + fn test_memory_string_parsing() { + assert_eq!(parse_memory_string("512M").unwrap(), 512 * 1024 * 1024); + assert_eq!(parse_memory_string("1G").unwrap(), 1024 * 1024 * 1024); + assert!(parse_memory_string("invalid").is_err()); + } + + #[test] + fn test_invalid_resource_limits() { + let memory_tests = vec![ + ("1X", "Invalid memory unit: X"), + ("abc", "Invalid memory value: abc"), + ("12.5G", "Invalid memory value: 12.5G"), + ]; + + for (input, expected_error) in memory_tests { + let result = parse_memory_string(input); + assert!(matches!( + result, + Err(DockerError::InvalidResourceLimit(msg)) if msg.contains(expected_error) + )); + } + } +} diff --git a/src/config/volume.rs b/src/config/volume.rs index 8363ed2..a0e8c26 100644 --- a/src/config/volume.rs +++ b/src/config/volume.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(Debug, Clone, PartialEq)] -pub enum VolumeType { +pub enum Volume { Named(String), Bind { source: String, @@ -22,14 +22,14 @@ pub enum VolumeType { #[derive(Debug, Deserialize)] #[serde(untagged)] enum VolumeSpec { - Empty(()), + Empty, Full { driver: Option, driver_opts: Option>, }, } -impl<'de> Deserialize<'de> for VolumeType { +impl<'de> Deserialize<'de> for Volume { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -56,27 +56,27 @@ impl<'de> Deserialize<'de> for VolumeType { match parts.len() { 2 => { if parts[0].starts_with('/') || parts[0].starts_with("./") { - Ok(VolumeType::Bind { + Ok(Volume::Bind { source: parts[0].to_string(), target: parts[1].to_string(), read_only: false, }) } else { - Ok(VolumeType::Named(s.to_string())) + Ok(Volume::Named(s.to_string())) } } 3 if parts[2] == "ro" => { if parts[0].starts_with('/') || parts[0].starts_with("./") { - Ok(VolumeType::Bind { + Ok(Volume::Bind { source: parts[0].to_string(), target: parts[1].to_string(), read_only: true, }) } else { - Ok(VolumeType::Named(s.to_string())) + Ok(Volume::Named(s.to_string())) } } - _ => Ok(VolumeType::Named(s.to_string())), + _ => Ok(Volume::Named(s.to_string())), } } VolumeInput::Long { @@ -85,7 +85,7 @@ impl<'de> Deserialize<'de> for VolumeType { typ, read_only, } => match typ.as_deref() { - Some("bind") => Ok(VolumeType::Bind { + Some("bind") => Ok(Volume::Bind { source, target, read_only, @@ -96,7 +96,7 @@ impl<'de> Deserialize<'de> for VolumeType { } else { format!("{}:{}", source, target) }; - Ok(VolumeType::Named(name)) + Ok(Volume::Named(name)) } Some(t) => Err(serde::de::Error::custom(format!( "Invalid volume type: {}", @@ -104,7 +104,7 @@ impl<'de> Deserialize<'de> for VolumeType { ))), }, VolumeInput::TopLevel(spec) => match spec { - VolumeSpec::Empty(_) => Ok(VolumeType::Config { + VolumeSpec::Empty => Ok(Volume::Config { name: String::new(), driver: None, driver_opts: None, @@ -112,7 +112,7 @@ impl<'de> Deserialize<'de> for VolumeType { VolumeSpec::Full { driver, driver_opts, - } => Ok(VolumeType::Config { + } => Ok(Volume::Config { name: String::new(), driver, driver_opts, @@ -122,14 +122,14 @@ impl<'de> Deserialize<'de> for VolumeType { } } -impl Serialize for VolumeType { +impl Serialize for Volume { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { match self { - VolumeType::Named(name) => serializer.serialize_str(name), - VolumeType::Bind { + Volume::Named(name) => serializer.serialize_str(name), + Volume::Bind { source, target, read_only, @@ -140,7 +140,7 @@ impl Serialize for VolumeType { serializer.serialize_str(&format!("{}:{}", source, target)) } } - VolumeType::Config { + Volume::Config { driver, driver_opts, .. @@ -170,11 +170,11 @@ impl Serialize for VolumeType { } } -impl VolumeType { - #[cfg(feature = "docker")] - pub fn to_mount(&self) -> Mount { - match self { - VolumeType::Named(name) => { +#[cfg(feature = "docker")] +impl From for Mount { + fn from(volume_type: Volume) -> Self { + match volume_type { + Volume::Named(name) => { let parts: Vec<&str> = name.split(':').collect(); Mount { target: Some(parts[1].to_string()), @@ -183,30 +183,32 @@ impl VolumeType { ..Default::default() } } - VolumeType::Bind { + Volume::Bind { source, target, read_only, } => Mount { - target: Some(target.to_string()), - source: Some(source.to_string()), + target: Some(target), + source: Some(source), typ: Some(MountTypeEnum::BIND), - read_only: Some(*read_only), + read_only: Some(read_only), ..Default::default() }, - VolumeType::Config { name, .. } => Mount { - source: Some(name.clone()), + Volume::Config { name, .. } => Mount { + source: Some(name), typ: Some(MountTypeEnum::VOLUME), ..Default::default() }, } } +} +impl Volume { pub fn matches_name(&self, name: &str) -> bool { match self { - VolumeType::Named(volume_name) => volume_name.split(':').next().unwrap_or("") == name, - VolumeType::Bind { target, .. } => target == name, - VolumeType::Config { + Volume::Named(volume_name) => volume_name.split(':').next().unwrap_or("") == name, + Volume::Bind { target, .. } => target == name, + Volume::Config { name: volume_name, .. } => volume_name == name, } diff --git a/src/container.rs b/src/container.rs index 5c6edff..a1c91bc 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,8 +1,5 @@ //! Utilities for spinning up and managing Docker containers -//! -//! This module provides wrappers around [`bollard`] to simplify Docker interactions within blueprints. -pub use bollard; use bollard::container::{ Config, CreateContainerOptions, InspectContainerOptions, ListContainersOptions, StartContainerOptions, StopContainerOptions, WaitContainerOptions, @@ -97,15 +94,14 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// // We can now start our container - /// container.start(true).await?; - /// Ok(()) - /// } + /// // We can now start our container + /// container.start(true).await?; + /// # Ok(()) } /// ``` pub fn new(client: &'a Docker, image: T) -> Self where @@ -132,21 +128,20 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// // We can now start our container and grab its id - /// container.start(false).await?; + /// // We can now start our container and grab its id + /// container.start(false).await?; /// - /// let id = container.id().unwrap(); + /// let id = container.id().unwrap(); /// - /// let container2 = Container::from_id(&connection, id).await?; + /// let container2 = Container::from_id(&connection, id).await?; /// - /// assert_eq!(container.id(), container2.id()); - /// Ok(()) - /// } + /// assert_eq!(container.id(), container2.id()); + /// # Ok(()) } /// ``` pub async fn from_id(client: &'a Docker, id: T) -> Result where @@ -223,17 +218,16 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// container.env(["FOO=BAR", "BAZ=QUX"]); + /// container.env(["FOO=BAR", "BAZ=QUX"]); /// - /// // We can now start our container, and the "FOO" and "BAZ" env vars will be set - /// container.start(true).await?; - /// Ok(()) - /// } + /// // We can now start our container, and the "FOO" and "BAZ" env vars will be set + /// container.start(true).await?; + /// # Ok(()) } /// ``` pub fn env(&mut self, env: impl IntoIterator>) -> &mut Self { self.options.env = Some(env.into_iter().map(Into::into).collect()); @@ -252,17 +246,16 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// container.cmd(["echo", "Hello!"]); + /// container.cmd(["echo", "Hello!"]); /// - /// // We can now start our container, and the command "echo Hello!" will run - /// container.start(true).await?; - /// Ok(()) - /// } + /// // We can now start our container, and the command "echo Hello!" will run + /// container.start(true).await?; + /// # Ok(()) } /// ``` pub fn cmd(&mut self, cmd: impl IntoIterator>) -> &mut Self { self.options.cmd = Some(cmd.into_iter().map(Into::into).collect()); @@ -280,18 +273,17 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// // Mount './my-host-dir' at '/some/container/dir' and make it read-only - /// container.binds(["./my-host-dir:/some/container/dir:ro"]); + /// // Mount './my-host-dir' at '/some/container/dir' and make it read-only + /// container.binds(["./my-host-dir:/some/container/dir:ro"]); /// - /// // We can now start our container - /// container.start(true).await?; - /// Ok(()) - /// } + /// // We can now start our container + /// container.start(true).await?; + /// # Ok(()) } /// ``` pub fn binds(&mut self, binds: impl IntoIterator>) -> &mut Self { self.options.binds = Some(binds.into_iter().map(Into::into).collect()); @@ -322,22 +314,21 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// container.env(["FOO=BAR", "BAZ=QUX"]); - /// container.cmd(["echo", "Hello!"]); - /// container.binds(["./host-data:/container-data"]); + /// container.env(["FOO=BAR", "BAZ=QUX"]); + /// container.cmd(["echo", "Hello!"]); + /// container.binds(["./host-data:/container-data"]); /// - /// // The container is created using the above settings - /// container.create().await?; + /// // The container is created using the above settings + /// container.create().await?; /// - /// // Now it can be started - /// container.start(true).await?; - /// Ok(()) - /// } + /// // Now it can be started + /// container.start(true).await?; + /// # Ok(()) } /// ``` #[tracing::instrument] pub async fn create(&mut self) -> Result<(), bollard::errors::Error> { @@ -379,22 +370,21 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// container.cmd(["echo", "Hello!"]); + /// container.cmd(["echo", "Hello!"]); /// - /// // We can now start our container, and the command "echo Hello!" will run. - /// let wait_for_exit = true; - /// container.start(wait_for_exit).await?; + /// // We can now start our container, and the command "echo Hello!" will run. + /// let wait_for_exit = true; + /// container.start(wait_for_exit).await?; /// - /// // Since we waited for the container to exit, we don't have to stop it. - /// // It can now just be removed. - /// container.remove(None).await?; - /// Ok(()) - /// } + /// // Since we waited for the container to exit, we don't have to stop it. + /// // It can now just be removed. + /// container.remove(None).await?; + /// # Ok(()) } /// ``` #[tracing::instrument] pub async fn start(&mut self, wait_for_exit: bool) -> Result<(), bollard::errors::Error> { @@ -427,29 +417,27 @@ impl<'a> Container<'a> { /// use std::time::Duration; /// use tokio::time; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); - /// - /// container.cmd(["echo", "Hello!"]); + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// let wait_for_exit = false; - /// container.start(wait_for_exit).await?; + /// container.cmd(["echo", "Hello!"]); /// - /// loop { - /// let status = container.status().await?.unwrap(); - /// if status.is_active() { - /// time::sleep(Duration::from_secs(5)).await; - /// continue; - /// } + /// let wait_for_exit = false; + /// container.start(wait_for_exit).await?; /// - /// println!("Container exited!"); - /// break; + /// loop { + /// let status = container.status().await?.unwrap(); + /// if status.is_active() { + /// time::sleep(Duration::from_secs(5)).await; + /// continue; /// } /// - /// Ok(()) + /// println!("Container exited!"); + /// break; /// } + /// # Ok(()) } /// ``` pub async fn status(&self) -> Result, Error> { if self.id.is_none() { @@ -484,20 +472,19 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; /// - /// let mut container = Container::new(&connection, "rustlang/rust"); + /// let mut container = Container::new(&connection, "rustlang/rust"); /// - /// // Does nothing, the container isn't started - /// container.stop().await?; + /// // Does nothing, the container isn't started + /// container.stop().await?; /// - /// // Stops the running container - /// container.start(false).await?; - /// container.stop().await?; - /// Ok(()) - /// } + /// // Stops the running container + /// container.start(false).await?; + /// container.stop().await?; + /// # Ok(()) } /// ``` #[tracing::instrument] pub async fn stop(&mut self) -> Result<(), bollard::errors::Error> { @@ -526,24 +513,23 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; /// - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// // Start our container - /// container.start(false).await?; + /// // Start our container + /// container.start(false).await?; /// - /// let remove_container_options = bollard::container::RemoveContainerOptions { - /// force: true, - /// ..Default::default() - /// }; + /// let remove_container_options = bollard::container::RemoveContainerOptions { + /// force: true, + /// ..Default::default() + /// }; /// - /// // Kills the container and removes it - /// container.remove(Some(remove_container_options)).await?; - /// Ok(()) - /// } + /// // Kills the container and removes it + /// container.remove(Some(remove_container_options)).await?; + /// # Ok(()) } /// ``` /// /// [`RemoveContainerOptions::force`]: bollard::container::RemoveContainerOptions::force @@ -572,19 +558,18 @@ impl<'a> Container<'a> { /// use dockworker::container::Container; /// use dockworker::DockerBuilder; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; /// - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); /// - /// // Start our container - /// container.start(false).await?; + /// // Start our container + /// container.start(false).await?; /// - /// // Once this returns, we know that the container has exited. - /// container.wait().await?; - /// Ok(()) - /// } + /// // Once this returns, we know that the container has exited. + /// container.wait().await?; + /// # Ok(()) } /// ``` #[tracing::instrument] pub async fn wait(&self) -> Result<(), bollard::errors::Error> { @@ -614,35 +599,34 @@ impl<'a> Container<'a> { /// use dockworker::DockerBuilder; /// use futures::StreamExt; /// - /// #[tokio::main] - /// async fn main() -> Result<(), dockworker::container::Error> { - /// let connection = DockerBuilder::new()?; - /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); - /// - /// // Start our container and wait for it to exit - /// container.start(true).await?; - /// - /// // We want to collect logs from stderr - /// let logs_options = bollard::container::LogsOptions { - /// stderr: true, - /// follow: true, - /// ..Default::default() - /// }; - /// - /// // Get our log stream - /// let mut logs = container - /// .logs(Some(logs_options)) - /// .await - /// .expect("logs should be present"); - /// - /// // Now we want to print anything from stderr - /// while let Some(Ok(out)) = logs.next().await { - /// if let bollard::container::LogOutput::StdErr { message } = out { - /// eprintln!("Uh oh! Something was written to stderr: {:?}", message); - /// } + /// # #[tokio::main] + /// # async fn main() -> Result<(), dockworker::container::Error> { + /// let connection = DockerBuilder::new().await?; + /// let mut container = Container::new(connection.get_client(), "rustlang/rust"); + /// + /// // Start our container and wait for it to exit + /// container.start(true).await?; + /// + /// // We want to collect logs from stderr + /// let logs_options = bollard::container::LogsOptions { + /// stderr: true, + /// follow: true, + /// ..Default::default() + /// }; + /// + /// // Get our log stream + /// let mut logs = container + /// .logs(Some(logs_options)) + /// .await + /// .expect("logs should be present"); + /// + /// // Now we want to print anything from stderr + /// while let Some(Ok(out)) = logs.next().await { + /// if let bollard::container::LogOutput::StdErr { message } = out { + /// eprintln!("Uh oh! Something was written to stderr: {:?}", message); /// } - /// Ok(()) /// } + /// # Ok(()) } /// ``` #[tracing::instrument] pub async fn logs( diff --git a/src/lib.rs b/src/lib.rs index 015e48f..690d817 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,7 @@ pub use config::{ compose::{BuildConfig, ComposeConfig, Service}, - volume::VolumeType, + volume::Volume, }; pub use error::DockerError; @@ -11,7 +11,7 @@ pub mod error; pub mod parser; #[cfg(test)] -pub mod tests; +mod test_fixtures; #[cfg(feature = "docker")] pub mod builder; @@ -19,3 +19,5 @@ pub mod builder; pub use builder::DockerBuilder; #[cfg(feature = "docker")] pub mod container; +#[cfg(feature = "docker")] +pub use bollard; diff --git a/src/parser/compose.rs b/src/parser/compose.rs index 7834e0a..c94014e 100644 --- a/src/parser/compose.rs +++ b/src/parser/compose.rs @@ -1,7 +1,12 @@ +#[cfg(test)] +mod tests; + +use super::env; use crate::{config::compose::ComposeConfig, error::DockerError}; -use regex::Regex; use std::collections::HashMap; -use std::path::Path; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; /// Parser for Docker Compose configuration files with environment variable support /// @@ -15,270 +20,169 @@ use std::path::Path; /// /// ```rust,no_run /// use dockworker::parser::ComposeParser; -/// use std::path::Path; /// -/// # async fn example() -> Result<(), Box> { +/// # #[tokio::main] +/// # async fn main() -> Result<(), Box> { /// // Parse a compose file with environment variables from a .env file -/// let config = -/// ComposeParser::from_file_with_env(Path::new("docker-compose.yml"), Path::new(".env")) -/// .await?; +/// let compose_path = "docker-compose.yml"; +/// let env_path = ".env"; +/// +/// let config = ComposeParser::new() +/// .env_file(env_path) +/// .parse_from_path(compose_path)?; /// /// // Parse a compose file with explicit environment variables /// let mut env_vars = std::collections::HashMap::new(); /// env_vars.insert("VERSION".to_string(), "1.0".to_string()); -/// let config = -/// ComposeParser::from_file_with_env_map(Path::new("docker-compose.yml"), &env_vars).await?; -/// # Ok(()) -/// # } +/// +/// let config = ComposeParser::new() +/// .env_vars(env_vars) +/// .parse_from_path(compose_path)?; +/// # Ok(()) } /// ``` -pub struct ComposeParser; +#[derive(Default, Clone)] +pub struct ComposeParser { + env_file_path: Option, + env_vars: Option>, +} impl ComposeParser { /// Parses a Docker Compose file from the given path /// - /// This is the simplest way to parse a compose file when no environment - /// variable substitution is needed. - /// /// # Arguments /// /// * `path` - Path to the Docker Compose file /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// /// # Examples /// /// ```rust,no_run - /// # use std::path::Path; - /// # use dockworker::parser::ComposeParser; - /// # async fn example() -> Result<(), Box> { - /// let config = ComposeParser::from_file(Path::new("docker-compose.yml")).await?; + /// use dockworker::parser::ComposeParser; + /// + /// # fn main() -> Result<(), Box> { + /// let config = ComposeParser::new().parse_from_path("docker-compose.yml")?; /// # Ok(()) /// # } /// ``` - pub async fn from_file>(path: P) -> Result { - let content = std::fs::read_to_string(path.as_ref()).map_err(DockerError::FileError)?; - Self::parse(&content) + pub fn parse_from_path>(self, path: P) -> Result { + let mut f = File::open(path)?; + self.parse(&mut f) } - /// Parses a Docker Compose file with environment variables from an env file - /// - /// This method reads both the compose file and an environment file, then - /// performs variable substitution before parsing. - /// - /// # Arguments - /// - /// * `compose_path` - Path to the Docker Compose file - /// * `env_path` - Path to the environment file - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use dockworker::parser::ComposeParser; - /// # async fn example() -> Result<(), Box> { - /// let config = - /// ComposeParser::from_file_with_env(Path::new("docker-compose.yml"), Path::new(".env")) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_file_with_env>( - compose_path: P, - env_path: P, - ) -> Result { - let content = - std::fs::read_to_string(compose_path.as_ref()).map_err(DockerError::FileError)?; - let env_content = - std::fs::read_to_string(env_path.as_ref()).map_err(DockerError::FileError)?; + pub fn parse(self, reader: &mut R) -> Result + where + R: Read, + { + let mut env_vars = self.env_vars.unwrap_or_default(); - let env_vars = Self::parse_env_file(&env_content)?; - let processed_content = Self::substitute_env_vars(&content, &env_vars)?; + if let Some(env_file_path) = self.env_file_path { + let env_content = std::fs::read_to_string(env_file_path).map_err(|e| { + DockerError::ValidationError(format!("Failed to read env file: {}", e)) + })?; - let config = Self::parse(&processed_content)?; + env_vars.extend(env::parse_env_file(&env_content)?); + } - // Validate environment variables - Self::validate_required_env_vars(&config, &env_vars)?; + let mut config_bytes = Vec::new(); + reader.read_to_end(&mut config_bytes)?; - Ok(config) - } + let compose = String::from_utf8(config_bytes).map_err(|e| { + DockerError::ValidationError(format!("Failed to read compose file: {}", e)) + })?; + let processed_content = env::substitute_env_vars(&compose, &env_vars)?; - /// Parses a Docker Compose file with environment variables from a HashMap - /// - /// This method is useful when you want to provide environment variables - /// programmatically rather than from a file. - /// - /// # Arguments - /// - /// * `compose_path` - Path to the Docker Compose file - /// * `env_vars` - HashMap containing environment variable key-value pairs - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::path::Path; - /// # use std::collections::HashMap; - /// # use dockworker::parser::ComposeParser; - /// # async fn example() -> Result<(), Box> { - /// let mut env_vars = HashMap::new(); - /// env_vars.insert("VERSION".to_string(), "1.0".to_string()); - /// let config = - /// ComposeParser::from_file_with_env_map(Path::new("docker-compose.yml"), &env_vars).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn from_file_with_env_map>( - compose_path: P, - env_vars: &HashMap, - ) -> Result { - let content = - std::fs::read_to_string(compose_path.as_ref()).map_err(DockerError::FileError)?; - let processed_content = Self::substitute_env_vars(&content, env_vars)?; - - let config = Self::parse(&processed_content)?; + let config: ComposeConfig = + serde_yaml::from_str(&processed_content).map_err(DockerError::YamlError)?; // Validate environment variables - Self::validate_required_env_vars(&config, env_vars)?; + validate_required_env_vars(&config, &env_vars)?; Ok(config) } +} - /// Parses a Docker Compose configuration from a string - /// - /// This is the core parsing method that other methods build upon. - /// It handles the basic YAML parsing and normalization. - /// - /// # Arguments - /// - /// * `content` - The string containing the Docker Compose YAML configuration - /// - /// # Returns - /// - /// Returns a `Result` containing the parsed `ComposeConfig` or a `DockerError` if parsing fails - /// - /// # Examples - /// - /// ```rust - /// use dockworker::parser::ComposeParser; - /// - /// let content = r#" - /// version: "3" - /// services: - /// web: - /// image: nginx - /// ports: - /// - "80:80" - /// "#; - /// - /// let config = ComposeParser::parse(content).unwrap(); - /// ``` - /// - /// # Errors - /// - /// Returns `DockerError::YamlError` if the YAML parsing fails - pub fn parse(content: &str) -> Result { - let config: ComposeConfig = - serde_yaml::from_str(content).map_err(DockerError::YamlError)?; - Ok(config) +impl ComposeParser { + pub fn new() -> Self { + Self { + env_file_path: None, + env_vars: None, + } } - /// Parses an environment file into a HashMap of key-value pairs - /// - /// This method parses environment variable definitions from a file into a HashMap. - /// It handles various formats and performs validation of variable names. + /// Set an env file for environment variable substitution /// /// # Arguments /// - /// * `content` - The string content of the environment file to parse - /// - /// # Returns - /// - /// Returns a `Result` containing a `HashMap` of environment variables, - /// or a `DockerError` if parsing fails + /// * `path` - Path to the environment variables file /// - /// # Format + /// # Examples /// - /// The following formats are supported: - /// - Comments (lines starting with #) - /// - Empty lines (ignored) - /// - KEY=value format - /// - Quoted values (quotes are stripped) + /// ``` + /// use dockworker::parser::compose::ComposeParser; + /// use std::fs::write; + /// use std::path::Path; + /// use tempfile::NamedTempFile; + /// # use dockworker::error::DockerError; /// - /// Variable names must: - /// - Start with a letter or underscore - /// - Contain only alphanumeric characters and underscores + /// # fn main() -> Result<(), DockerError> { + /// let compose_content = r#"version: "3" + /// services: + /// app1: + /// image: "nginx:${VERSION:-latest}" + /// environment: + /// PORT: "${PORT}" + /// DEBUG: "true" + /// app2: + /// image: "nginx:${VERSION:-latest}" + /// environment: + /// - PORT=${PORT} + /// - DEBUG=true"#; /// - /// # Examples + /// let env_file = NamedTempFile::new()?; + /// write(env_file.path(), b"VERSION=1.21\nPORT=8080")?; /// - /// ``` - /// use dockworker::parser::ComposeParser; + /// let config = ComposeParser::new() + /// .env_file(env_file.path()) + /// .parse(&mut compose_content.as_bytes())?; /// - /// let content = r#" - /// # Database settings - /// DB_HOST=localhost - /// DB_PORT=5432 - /// DB_NAME="myapp" - /// "#; + /// // Test map syntax service + /// let app1 = config.services.get("app1").unwrap(); + /// assert_eq!(app1.image.as_deref(), Some("nginx:1.21")); + /// if let Some(env) = &app1.environment { + /// assert_eq!(env.get("PORT").map(String::as_str), Some("8080")); + /// assert_eq!(env.get("DEBUG").map(String::as_str), Some("true")); + /// } /// - /// let env_vars = ComposeParser::parse_env_file(content).unwrap(); - /// assert_eq!(env_vars.get("DB_HOST"), Some(&"localhost".to_string())); + /// // Test list syntax service + /// let app2 = config.services.get("app2").unwrap(); + /// assert_eq!(app2.image.as_deref(), Some("nginx:1.21")); + /// if let Some(env) = &app2.environment { + /// assert_eq!(env.get("PORT").map(String::as_str), Some("8080")); + /// assert_eq!(env.get("DEBUG").map(String::as_str), Some("true")); + /// } + /// # Ok(()) + /// # } /// ``` - pub fn parse_env_file(content: &str) -> Result, DockerError> { - let mut vars = HashMap::new(); - let valid_key = Regex::new(r"^[a-zA-Z_][a-zA-Z0-9_]*$").unwrap(); - - for line in content.lines() { - let line = line.trim(); - if line.is_empty() || line.starts_with('#') { - continue; - } - - if let Some((key, value)) = line.split_once('=') { - let key = key.trim(); - if valid_key.is_match(key) { - vars.insert(key.to_string(), value.trim().trim_matches('"').to_string()); - } - } - } - - Ok(vars) + pub fn env_file>(mut self, path: P) -> Self { + self.env_file_path = Some(path.as_ref().to_path_buf()); + self } - /// Parses a Docker Compose file with environment variable substitution from an env file - /// - /// # Arguments - /// - /// * `content` - The Docker Compose file content as a string - /// * `env_path` - Path to the environment variables file - /// - /// # Returns + /// Parses a Docker Compose file with environment variables from a HashMap /// - /// * `Result` - The parsed compose config with environment variables substituted + /// This method is useful when you want to provide environment variables + /// programmatically rather than from a file. /// - /// # Errors + /// # Arguments /// - /// Returns a `DockerError` if: - /// * The environment file cannot be read - /// * Environment variable parsing fails - /// * Environment variable substitution fails - /// * Compose file parsing fails + /// * `vars` - HashMap containing environment variable key-value pairs /// /// # Examples /// - /// ``` + /// ```rust + /// use dockworker::parser::compose::ComposeParser; + /// use std::collections::HashMap; /// use std::fs::write; /// use std::path::Path; - /// use tempfile::NamedTempFile; - /// # use dockworker::parser::compose::ComposeParser; /// # use dockworker::error::DockerError; /// /// # fn main() -> Result<(), DockerError> { @@ -294,11 +198,13 @@ impl ComposeParser { /// environment: /// - PORT=${PORT} /// - DEBUG=true"#; + /// let mut env_vars = HashMap::new(); + /// env_vars.insert(String::from("VERSION"), String::from("1.21")); + /// env_vars.insert(String::from("PORT"), String::from("8080")); /// - /// let env_file = NamedTempFile::new().unwrap(); - /// write(env_file.path(), b"VERSION=1.21\nPORT=8080").unwrap(); - /// - /// let config = ComposeParser::parse_with_env(compose_content, env_file.path())?; + /// let config = ComposeParser::new() + /// .env_vars(env_vars) + /// .parse(&mut compose_content.as_bytes())?; /// /// // Test map syntax service /// let app1 = config.services.get("app1").unwrap(); @@ -318,402 +224,40 @@ impl ComposeParser { /// # Ok(()) /// # } /// ``` - pub fn parse_with_env(content: &str, env_path: &Path) -> Result { - // Read environment variables from file - let env_content = std::fs::read_to_string(env_path) - .map_err(|e| DockerError::ValidationError(format!("Failed to read env file: {}", e)))?; - - // Parse environment variables using existing function - let env_vars = Self::parse_env_file(&env_content)?; - - // Substitute environment variables in the content - let content = Self::substitute_env_vars(content, &env_vars)?; - - // Parse the content with substituted environment variables - Self::parse(&content) + pub fn env_vars(mut self, vars: HashMap) -> Self { + self.env_vars = Some(vars); + self } +} - /// Substitutes environment variables in a string - /// - /// Supports the following formats: - /// - ${VAR} - /// - ${VAR:-default} - /// - $VAR - /// - /// # Arguments - /// - /// * `content` - The string containing environment variable references - /// * `env_vars` - HashMap of environment variables to use for substitution - fn substitute_env_vars( - content: &str, - env_vars: &HashMap, - ) -> Result { - let mut result = content.to_string(); - - // Handle ${VAR:-default} syntax - let re_with_default = Regex::new(r"\$\{([^{}:]+):-([^{}]*)\}").unwrap(); - result = re_with_default - .replace_all(&result, |caps: ®ex::Captures| { - let var_name = caps.get(1).unwrap().as_str(); - let default_value = caps.get(2).unwrap().as_str(); - match env_vars.get(var_name) { - Some(value) if value.is_empty() => default_value.to_string(), - Some(value) => value.to_string(), - None => default_value.to_string(), - } - }) - .to_string(); - - // Handle ${VAR} syntax - let re_simple = Regex::new(r"\$\{([^{}]+)\}").unwrap(); - result = re_simple - .replace_all(&result, |caps: ®ex::Captures| { - let var_name = caps.get(1).unwrap().as_str(); - env_vars - .get(var_name) - .map(|v| v.as_str()) - .unwrap_or("") - .to_string() - }) - .to_string(); - - // Handle $VAR syntax - let re_basic = Regex::new(r"\$([a-zA-Z_][a-zA-Z0-9_]*)").unwrap(); - result = re_basic - .replace_all(&result, |caps: ®ex::Captures| { - let var_name = caps.get(1).unwrap().as_str(); - env_vars - .get(var_name) - .map(|v| v.as_str()) - .unwrap_or("") - .to_string() - }) - .to_string(); - - Ok(result) - } - - /// Validates that all required environment variables are present - fn validate_required_env_vars( - config: &ComposeConfig, - env_vars: &HashMap, - ) -> Result<(), DockerError> { - let mut required_vars = std::collections::HashSet::new(); - - // Collect all required environment variables from the compose file - for service in config.services.values() { - if let Some(env) = &service.environment { - for (key, value) in env { - if value.contains("${") && !value.contains(":-") { - required_vars.insert(key.clone()); - } +/// Validates that all required environment variables are present +fn validate_required_env_vars( + config: &ComposeConfig, + env_vars: &HashMap, +) -> Result<(), DockerError> { + let mut required_vars = std::collections::HashSet::new(); + + // Collect all required environment variables from the compose file + for service in config.services.values() { + if let Some(env) = &service.environment { + for (key, value) in env { + if value.contains("${") && !value.contains(":-") { + required_vars.insert(key.clone()); } } } - - // Check if all required variables are present - let env_keys: std::collections::HashSet<_> = env_vars.keys().cloned().collect(); - let missing_vars: Vec<_> = required_vars.difference(&env_keys).collect(); - - if !missing_vars.is_empty() { - return Err(DockerError::ValidationError(format!( - "Missing required environment variables: {:?}", - missing_vars - ))); - } - - Ok(()) - } -} - -#[cfg(test)] -#[allow(clippy::literal_string_with_formatting_args)] -mod tests { - use super::*; - use std::fs; - use tempfile::NamedTempFile; - - #[test] - fn test_basic_env_substitution() { - let mut env_vars = HashMap::new(); - env_vars.insert("IMAGE_TAG__L2GETH".to_string(), "v1.0.0".to_string()); - env_vars.insert("SIMPLE_VAR".to_string(), "value".to_string()); - - let content = r#" - services: - l2geth: - image: ethereumoptimism/l2geth:${IMAGE_TAG__L2GETH:-latest} - other: - image: something:${UNDEFINED_VAR:-default} - simple: - value: $SIMPLE_VAR - "#; - - let result = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - - assert!(result.contains("ethereumoptimism/l2geth:v1.0.0")); - assert!(result.contains("something:default")); - assert!(result.contains("value: value")); - } - - #[test] - fn test_nested_env_substitution() { - let mut env_vars = HashMap::new(); - env_vars.insert("PORT".to_string(), "8545".to_string()); - env_vars.insert("HOST".to_string(), "localhost".to_string()); - - let content = r#" - services: - geth: - ports: - - "${PORT:-3000}:${PORT:-3000}" - environment: - - URL=http://${HOST:-127.0.0.1}:${PORT:-3000} - - SIMPLE=$HOST:$PORT - "#; - - let result = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - - assert!(result.contains("8545:8545")); - assert!(result.contains("http://localhost:8545")); - assert!(result.contains("SIMPLE=localhost:8545")); - } - - #[test] - fn test_env_file_parsing() { - let env_content = r#" - # Comment line - EMPTY= - QUOTED="quoted value" - UNQUOTED=unquoted value - WITH_SPACES= spaced value - "#; - - let temp_file = NamedTempFile::new().unwrap(); - fs::write(&temp_file, env_content).unwrap(); - - let vars = ComposeParser::parse_env_file(env_content).unwrap(); - - assert_eq!(vars.get("EMPTY").unwrap(), ""); - assert_eq!(vars.get("QUOTED").unwrap(), "quoted value"); - assert_eq!(vars.get("UNQUOTED").unwrap(), "unquoted value"); - assert_eq!(vars.get("WITH_SPACES").unwrap(), "spaced value"); - } - - #[test] - fn test_complex_substitutions() { - let mut env_vars = HashMap::new(); - env_vars.insert("VERSION".to_string(), "1.0".to_string()); - env_vars.insert("MEMORY".to_string(), "1G".to_string()); - - let content = r#" - services: - app: - image: myapp:${VERSION:-latest} - deploy: - requirements: - limits: - memory: ${MEMORY:-512M} - cpus: ${CPUS:-1.0} - environment: - - CONFIG=${CONFIG_PATH:-/etc/config} - - COMBINED=${VERSION:-0.0.1}-${MEMORY:-256M} - "#; - - let result = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - - assert!(result.contains("myapp:1.0")); - assert!(result.contains("memory: 1G")); - assert!(result.contains("cpus: 1.0")); // Uses default - assert!(result.contains("CONFIG=/etc/config")); // Uses default - assert!(result.contains("COMBINED=1.0-1G")); - } - - #[test] - fn test_invalid_env_file() { - let env_content = r#" - VALID_KEY=value - INVALID+KEY=value - 123INVALID=value - _VALID=value - ALSO-INVALID=value - "#; - let vars = ComposeParser::parse_env_file(env_content).unwrap(); - - assert!(vars.contains_key("VALID_KEY")); - assert!(vars.contains_key("_VALID")); - assert!(!vars.contains_key("INVALID+KEY")); - assert!(!vars.contains_key("123INVALID")); - assert!(!vars.contains_key("ALSO-INVALID")); - assert_eq!(vars.len(), 2); - } - - #[test] - fn test_empty_and_missing_variables() { - let mut env_vars = HashMap::new(); - env_vars.insert("EMPTY".to_string(), "".to_string()); - - let content = r#" - services: - app: - image: test:${EMPTY:-default} - command: ${MISSING} - environment: - - UNSET=${UNDEFINED:-} - - WITH_DEFAULT=${UNDEFINED:-default_value} - "#; - - let result = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - - assert!( - result.contains("test:default"), - "Empty var should use default" - ); - assert!(result.contains("command: "), "Missing var should be empty"); - assert!( - result.contains("UNSET="), - "Undefined with empty default should be empty" - ); - assert!( - result.contains("WITH_DEFAULT=default_value"), - "Undefined should use default value" - ); - } - - #[test] - fn test_empty_default_values() { - let env_vars = HashMap::new(); - let content = r#" - TEST1=${VAR:-} - TEST2=${OTHER_VAR:-default} - "#; - - let result = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - assert!(result.contains("TEST1=")); - assert!(result.contains("TEST2=default")); - } - - #[test] - fn test_full_compose_parsing() { - let compose_content = r#" - version: "3.8" - services: - web: - image: nginx:${NGINX_VERSION:-latest} - ports: - - "${PORT:-80}:80" - "#; - - let env_content = "NGINX_VERSION=1.21\nPORT=8080"; - let temp_file = NamedTempFile::new().unwrap(); - fs::write(&temp_file, env_content).unwrap(); - - let config = ComposeParser::parse_with_env(compose_content, temp_file.path()).unwrap(); - - if let Some(web_service) = config.services.get("web") { - assert_eq!(web_service.image.as_deref().unwrap(), "nginx:1.21"); - assert_eq!( - web_service.ports.as_ref().unwrap().first().unwrap(), - "8080:80" - ); - } else { - panic!("Web service not found in parsed config"); - } } - #[test] - fn test_environment_variable_formats() { - // Test both map and list formats - let content = r#"version: "3" -services: - app1: - environment: - KEY1: value1 - KEY2: value2 - app2: - environment: - - KEY3=value3 - - KEY4=value4"#; - - let config = ComposeParser::parse(content).unwrap(); - - // Check map format - let app1 = config.services.get("app1").unwrap(); - if let Some(env) = &app1.environment { - assert_eq!(env.get("KEY1").map(String::as_str), Some("value1")); - assert_eq!(env.get("KEY2").map(String::as_str), Some("value2")); - } else { - panic!("app1 environment should be Some"); - } + // Check if all required variables are present + let env_keys: std::collections::HashSet<_> = env_vars.keys().cloned().collect(); + let missing_vars: Vec<_> = required_vars.difference(&env_keys).collect(); - // Check list format - let app2 = config.services.get("app2").unwrap(); - if let Some(env) = &app2.environment { - assert_eq!(env.get("KEY3").map(String::as_str), Some("value3")); - assert_eq!(env.get("KEY4").map(String::as_str), Some("value4")); - } else { - panic!("app2 environment should be Some"); - } - } - - #[test] - fn test_environment_variable_edge_cases() { - let content = r#"version: "3" -services: - app1: - environment: - EMPTY: "" - QUOTED: "quoted value" - SPACES: " value with spaces " - app2: - environment: - - EMPTY= - - QUOTED="quoted value" - - SPACES=" value with spaces ""#; - - let config = ComposeParser::parse(content).unwrap(); - - // Test both formats handle edge cases the same way - for service_name in ["app1", "app2"] { - let service = config.services.get(service_name).unwrap(); - if let Some(env) = &service.environment { - assert_eq!(env.get("EMPTY").map(String::as_str), Some("")); - assert_eq!(env.get("QUOTED").map(String::as_str), Some("quoted value")); - assert_eq!( - env.get("SPACES").map(String::as_str), - Some(" value with spaces ") - ); - } else { - panic!("{} environment should be Some", service_name); - } - } + if !missing_vars.is_empty() { + return Err(DockerError::ValidationError(format!( + "Missing required environment variables: {:?}", + missing_vars + ))); } - #[test] - fn test_environment_variable_substitution() { - let content = r#"version: "3" -services: - app1: - image: nginx:${VERSION:-latest} - environment: - PORT: "${PORT:-8080}" - DEBUG: "${DEBUG:-false}""#; - - let mut env_vars = HashMap::new(); - env_vars.insert("VERSION".to_string(), "1.21".to_string()); - env_vars.insert("DEBUG".to_string(), "true".to_string()); - - let processed = ComposeParser::substitute_env_vars(content, &env_vars).unwrap(); - let mut config = ComposeParser::parse(&processed).unwrap(); - config.resolve_env(&env_vars); - - let app1 = config.services.get("app1").unwrap(); - assert_eq!(app1.image.as_deref(), Some("nginx:1.21")); - if let Some(env) = &app1.environment { - assert_eq!(env.get("PORT").map(String::as_str), Some("8080")); // Uses default - assert_eq!(env.get("DEBUG").map(String::as_str), Some("true")); // Uses env var - } else { - panic!("app1 environment should be Some"); - } - } + Ok(()) } diff --git a/src/tests/compose.rs b/src/parser/compose/tests.rs similarity index 54% rename from src/tests/compose.rs rename to src/parser/compose/tests.rs index 52b46da..18734e3 100644 --- a/src/tests/compose.rs +++ b/src/parser/compose/tests.rs @@ -1,15 +1,14 @@ -use crate::{ - builder::compose::parse_memory_string, parser::ComposeParser, - tests::docker_file::is_docker_running, with_docker_cleanup, VolumeType, -}; -use bollard::container::ListContainersOptions; -use std::{collections::HashMap, path::PathBuf, time::Duration}; +#![allow(clippy::literal_string_with_formatting_args)] -use crate::{BuildConfig, ComposeConfig, DockerBuilder, Service}; +use crate::parser::env; +use crate::parser::ComposeParser; +use crate::test_fixtures::{get_local_reth_compose, get_reth_archive_compose}; +use crate::{ComposeConfig, Service, Volume}; +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; +use tempfile::NamedTempFile; -use super::fixtures::{get_local_reth_compose, get_reth_archive_compose}; - -// Sync tests that don't need Docker cleanup #[test] fn test_compose_parsing() { let yaml = r#" @@ -50,7 +49,7 @@ fn test_compose_parsing() { assert_eq!(volumes.len(), 1); match &volumes[0] { - VolumeType::Bind { + Volume::Bind { source, target, read_only, @@ -65,8 +64,8 @@ fn test_compose_parsing() { #[test] fn test_reth_archive_compose_parsing() { - let content = std::fs::read_to_string(get_reth_archive_compose()).unwrap(); - let config = ComposeParser::parse(&content).unwrap(); + let content = std::fs::read(get_reth_archive_compose()).unwrap(); + let config = ComposeParser::new().parse(&mut content.as_slice()).unwrap(); assert_eq!(config.version, "2"); assert_eq!(config.services.len(), 2); @@ -109,22 +108,20 @@ fn test_reth_archive_compose_parsing() { // Test reth service volumes let reth_volumes = reth.volumes.as_ref().unwrap(); assert_eq!(reth_volumes.len(), 2); - assert!(matches!(&reth_volumes[0], VolumeType::Named(name) if name == "reth_data:/data")); - assert!(matches!(&reth_volumes[1], VolumeType::Named(name) if name == "reth_jwt:/jwt:ro")); + assert!(matches!(&reth_volumes[0], Volume::Named(name) if name == "reth_data:/data")); + assert!(matches!(&reth_volumes[1], Volume::Named(name) if name == "reth_jwt:/jwt:ro")); // Test nimbus service volumes let nimbus_volumes = nimbus.volumes.as_ref().unwrap(); assert_eq!(nimbus_volumes.len(), 2); - assert!(matches!(&nimbus_volumes[0], VolumeType::Named(name) if name == "nimbus_data:/data")); - assert!( - matches!(&nimbus_volumes[1], VolumeType::Named(name) if name == "reth_jwt:/jwt/reth:ro") - ); + assert!(matches!(&nimbus_volumes[0], Volume::Named(name) if name == "nimbus_data:/data")); + assert!(matches!(&nimbus_volumes[1], Volume::Named(name) if name == "reth_jwt:/jwt/reth:ro")); } #[test] fn test_local_reth_compose_parsing() { - let content = std::fs::read_to_string(get_local_reth_compose()).unwrap(); - let config = ComposeParser::parse(&content).unwrap(); + let content = std::fs::read(get_local_reth_compose()).unwrap(); + let config = ComposeParser::new().parse(&mut content.as_slice()).unwrap(); assert_eq!(config.version, "3.9"); assert_eq!(config.services.len(), 3); @@ -167,133 +164,6 @@ fn test_local_reth_compose_parsing() { assert!(config.volumes.contains_key("grafanadata")); } -with_docker_cleanup!(test_compose_deployment, async |test_id: &str| { - if !is_docker_running() { - println!("Skipping test: Docker is not running"); - return; - } - - let builder = DockerBuilder::new().await.unwrap(); - let network_name = format!("test-network-{}", test_id); - - let mut labels = HashMap::new(); - labels.insert("test_id".to_string(), test_id.to_string()); - - // Create network with retry mechanism - builder - .create_network_with_retry(&network_name, 3, Duration::from_secs(2), Some(labels)) - .await - .unwrap(); - - // Create a simple test compose config - let mut services = HashMap::new(); - let mut env = HashMap::new(); - env.insert("TEST".to_string(), "value".to_string()); - - let mut labels = HashMap::new(); - labels.insert("test_id".to_string(), test_id.to_string()); - - let service_name = format!("test-service-{}", test_id); - services.insert( - service_name, - Service { - image: Some("alpine:latest".to_string()), - ports: Some(vec!["8080:80".to_string()]), - environment: Some(env.into()), - volumes: None, - networks: Some(vec![network_name.clone()]), - labels: Some(labels), - ..Service::default() - }, - ); - - let mut config = ComposeConfig { - version: "3".to_string(), - services, - volumes: HashMap::new(), - }; - - let container_ids = builder.deploy_compose(&mut config).await.unwrap(); - assert_eq!(container_ids.len(), 1); - - // Add a small delay to ensure Docker has time to start the container - tokio::time::sleep(Duration::from_millis(100)).await; - - // Verify containers are running - for (_, container_id) in container_ids { - let mut filters = HashMap::new(); - filters.insert("id".to_string(), vec![container_id.clone()]); - filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); - - let mut retries = 5; - let mut containers_found = false; - while retries > 0 { - match builder - .get_client() - .list_containers(Some(ListContainersOptions { - all: true, - filters: filters.clone(), - ..Default::default() - })) - .await - { - Ok(containers) => { - if containers.len() == 1 && containers[0].id.as_ref().unwrap() == &container_id - { - containers_found = true; - break; - } - } - Err(e) => println!("Error listing containers: {:?}", e), - } - retries -= 1; - tokio::time::sleep(Duration::from_millis(100)).await; - } - - assert!(containers_found, "Container not found or not running"); - } -}); - -with_docker_cleanup!(test_compose_with_build, async |_: &str| { - let builder = DockerBuilder::new().await.unwrap(); - - // Create a compose config with build context - let mut services = HashMap::new(); - services.insert( - "test-build-service".to_string(), - Service { - image: None, - build: Some(BuildConfig { - context: "./".to_string(), - dockerfile: Some("Dockerfile".to_string()), - }), - ports: None, - environment: None, - volumes: None, - networks: None, - requirements: None, - depends_on: None, - healthcheck: None, - restart: None, - command: None, - user: None, - labels: None, - platform: None, - env_file: None, - }, - ); - - let mut config = ComposeConfig { - version: "3".to_string(), - services, - volumes: HashMap::new(), - }; - - let result = builder.deploy_compose(&mut config).await; - // This should fail because we don't have a Dockerfile in the current directory - assert!(result.is_err()); -}); - #[test] fn test_volume_validation() { let mut config = ComposeConfig { @@ -304,8 +174,8 @@ fn test_volume_validation() { let service = Service { volumes: Some(vec![ - VolumeType::Named("test-data:/data".to_string()), - VolumeType::Bind { + Volume::Named("test-data:/data".to_string()), + Volume::Bind { source: PathBuf::from("/host").to_string_lossy().to_string(), target: "/container".to_string(), read_only: false, @@ -332,7 +202,7 @@ fn test_volume_validation() { // Sync tests that don't need Docker cleanup #[test] fn test_volume_serialization() { - let volume = VolumeType::Bind { + let volume = Volume::Bind { source: PathBuf::from("/host").to_string_lossy().to_string(), target: "/container".to_string(), read_only: true, @@ -354,7 +224,7 @@ fn test_service_deployment() { let mut config = ComposeConfig::default(); let service = Service { image: Some("nginx:latest".to_string()), - volumes: Some(vec![crate::VolumeType::Bind { + volumes: Some(vec![crate::Volume::Bind { source: PathBuf::from("/host/data").to_string_lossy().to_string(), target: "/container/data".to_string(), read_only: false, @@ -367,8 +237,130 @@ fn test_service_deployment() { } #[test] -fn test_memory_string_parsing() { - assert_eq!(parse_memory_string("512M").unwrap(), 512 * 1024 * 1024); - assert_eq!(parse_memory_string("1G").unwrap(), 1024 * 1024 * 1024); - assert!(parse_memory_string("invalid").is_err()); +fn test_full_compose_parsing() { + let compose_content = r#" + version: "3.8" + services: + web: + image: nginx:${NGINX_VERSION:-latest} + ports: + - "${PORT:-80}:80" + "#; + + let env_content = "NGINX_VERSION=1.21\nPORT=8080"; + let temp_file = NamedTempFile::new().unwrap(); + fs::write(&temp_file, env_content).unwrap(); + + let config = ComposeParser::new() + .env_file(temp_file.path()) + .parse(&mut compose_content.as_bytes()) + .unwrap(); + + if let Some(web_service) = config.services.get("web") { + assert_eq!(web_service.image.as_deref().unwrap(), "nginx:1.21"); + assert_eq!( + web_service.ports.as_ref().unwrap().first().unwrap(), + "8080:80" + ); + } else { + panic!("Web service not found in parsed config"); + } +} + +#[test] +fn test_environment_variable_formats() { + // Test both map and list formats + let content = r#"version: "3" +services: + app1: + environment: + KEY1: value1 + KEY2: value2 + app2: + environment: + - KEY3=value3 + - KEY4=value4"#; + + let config = ComposeParser::new().parse(&mut content.as_bytes()).unwrap(); + + // Check map format + let app1 = config.services.get("app1").unwrap(); + if let Some(env) = &app1.environment { + assert_eq!(env.get("KEY1").map(String::as_str), Some("value1")); + assert_eq!(env.get("KEY2").map(String::as_str), Some("value2")); + } else { + panic!("app1 environment should be Some"); + } + + // Check list format + let app2 = config.services.get("app2").unwrap(); + if let Some(env) = &app2.environment { + assert_eq!(env.get("KEY3").map(String::as_str), Some("value3")); + assert_eq!(env.get("KEY4").map(String::as_str), Some("value4")); + } else { + panic!("app2 environment should be Some"); + } +} + +#[test] +fn test_environment_variable_edge_cases() { + let content = r#"version: "3" +services: + app1: + environment: + EMPTY: "" + QUOTED: "quoted value" + SPACES: " value with spaces " + app2: + environment: + - EMPTY= + - QUOTED="quoted value" + - SPACES=" value with spaces ""#; + + let config = ComposeParser::new().parse(&mut content.as_bytes()).unwrap(); + + // Test both formats handle edge cases the same way + for service_name in ["app1", "app2"] { + let service = config.services.get(service_name).unwrap(); + if let Some(env) = &service.environment { + assert_eq!(env.get("EMPTY").map(String::as_str), Some("")); + assert_eq!(env.get("QUOTED").map(String::as_str), Some("quoted value")); + assert_eq!( + env.get("SPACES").map(String::as_str), + Some(" value with spaces ") + ); + } else { + panic!("{} environment should be Some", service_name); + } + } +} + +#[test] +fn test_environment_variable_substitution() { + let content = r#"version: "3" +services: + app1: + image: nginx:${VERSION:-latest} + environment: + PORT: "${PORT:-8080}" + DEBUG: "${DEBUG:-false}""#; + + let mut env_vars = HashMap::new(); + env_vars.insert("VERSION".to_string(), "1.21".to_string()); + env_vars.insert("DEBUG".to_string(), "true".to_string()); + + let processed = env::substitute_env_vars(content, &env_vars).unwrap(); + let mut config = ComposeParser::new() + .parse(&mut processed.as_bytes()) + .unwrap(); + config.resolve_env(&env_vars); + + let app1 = config.services.get("app1").unwrap(); + assert_eq!(app1.image.as_deref(), Some("nginx:1.21")); + if let Some(env) = &app1.environment { + assert_eq!(env.get("PORT").map(String::as_str), Some("8080")); // Uses default + assert_eq!(env.get("DEBUG").map(String::as_str), Some("true")); // Uses env var + } else { + panic!("app1 environment should be Some"); + } } diff --git a/src/parser/docker_file.rs b/src/parser/docker_file.rs index 7a2a34b..f228c8a 100644 --- a/src/parser/docker_file.rs +++ b/src/parser/docker_file.rs @@ -1,413 +1,378 @@ -use crate::{ - config::docker_file::{DockerCommand, DockerfileConfig}, - error::DockerError, -}; +#[cfg(test)] +mod tests; + +use crate::config::docker_file::{DockerCommand, DockerfileConfig}; +use crate::error::DockerError; use std::collections::HashMap; -pub struct DockerfileParser; +pub fn parse(content: &str) -> Result { + let mut config = DockerfileConfig { + base_image: String::new(), + commands: Vec::new(), + }; -impl DockerfileParser { - /// A parser for Dockerfile syntax. - /// - /// This parser reads and interprets Dockerfile commands, converting them into a structured - /// [`DockerfileConfig`] representation. It handles basic Dockerfile syntax including: - /// - Line continuations with backslash - /// - Comments starting with # - /// - Basic Dockerfile commands like FROM, COPY, etc. - /// - /// # Example - /// - /// ```rust,no_run - /// use dockworker::parser::DockerfileParser; - /// - /// let content = r#" - /// FROM ubuntu:latest - /// COPY . /app - /// RUN cargo build - /// "#; - /// - /// let config = DockerfileParser::parse(content).unwrap(); - /// ``` - /// - /// # Errors - /// - /// Returns [`DockerError::DockerfileError`] if: - /// - Command syntax is invalid - /// - Required arguments are missing - /// - Command is not recognized - pub fn parse(content: &str) -> Result { - let mut config = DockerfileConfig { - base_image: String::new(), - commands: Vec::new(), - }; + let mut current_command = String::new(); - let mut current_command = String::new(); + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } - for line in content.lines() { - let line = line.trim(); - if line.is_empty() || line.starts_with('#') { - continue; - } + // Handle line continuations + if let Some(s) = line.strip_suffix('\\') { + current_command.push_str(s); + current_command.push(' '); + continue; + } - // Handle line continuations - if let Some(s) = line.strip_suffix('\\') { - current_command.push_str(s); - current_command.push(' '); - continue; - } + if !current_command.is_empty() { + current_command.push_str(line); + parse_command(&mut config, ¤t_command)?; + current_command.clear(); + continue; + } - if !current_command.is_empty() { - current_command.push_str(line); - Self::parse_command(&mut config, ¤t_command)?; - current_command.clear(); - continue; - } + parse_command(&mut config, line)?; + } - Self::parse_command(&mut config, line)?; - } + Ok(config) +} - Ok(config) +fn parse_command(config: &mut DockerfileConfig, line: &str) -> Result<(), DockerError> { + let parts: Vec<&str> = line.splitn(2, ' ').collect(); + if parts.len() != 2 { + return Err(DockerError::DockerfileError( + "Invalid command syntax".to_string(), + )); } - fn parse_command(config: &mut DockerfileConfig, line: &str) -> Result<(), DockerError> { - let parts: Vec<&str> = line.splitn(2, ' ').collect(); - if parts.len() != 2 { - return Err(DockerError::DockerfileError( - "Invalid command syntax".to_string(), - )); + let (command, args) = (parts[0].to_uppercase(), parts[1].trim()); + match command.as_str() { + "FROM" => config.base_image = args.to_string(), + "COPY" => { + let parts: Vec<&str> = args.split_whitespace().collect(); + if parts.len() < 2 { + return Err(DockerError::DockerfileError( + "COPY requires source and destination".to_string(), + )); + } + let (chown, parts) = parse_chown_option(args); + let parts: Vec<&str> = parts.split_whitespace().collect(); + config.commands.push(DockerCommand::Copy { + source: parts[0].to_string(), + dest: parts[1].to_string(), + chown, + }); } + "EXPOSE" => { + // Split on whitespace first to handle multiple ports + let ports = args.split_whitespace(); + for port_spec in ports { + let parts: Vec<&str> = port_spec.split('/').collect(); + let port = parts[0].trim().parse::().map_err(|_| { + DockerError::DockerfileError(format!("Invalid port number: {}", parts[0])) + })?; - let (command, args) = (parts[0].to_uppercase(), parts[1].trim()); - match command.as_str() { - "FROM" => config.base_image = args.to_string(), - "COPY" => { - let parts: Vec<&str> = args.split_whitespace().collect(); - if parts.len() < 2 { - return Err(DockerError::DockerfileError( - "COPY requires source and destination".to_string(), - )); - } - let (chown, parts) = Self::parse_chown_option(args); - let parts: Vec<&str> = parts.split_whitespace().collect(); - config.commands.push(DockerCommand::Copy { - source: parts[0].to_string(), - dest: parts[1].to_string(), - chown, + config.commands.push(DockerCommand::Expose { + port, + protocol: parts.get(1).map(|p| p.trim().to_string()), }); } - "EXPOSE" => { - // Split on whitespace first to handle multiple ports - let ports = args.split_whitespace(); - for port_spec in ports { - let parts: Vec<&str> = port_spec.split('/').collect(); - let port = parts[0].trim().parse::().map_err(|_| { - DockerError::DockerfileError(format!("Invalid port number: {}", parts[0])) - })?; - - config.commands.push(DockerCommand::Expose { - port, - protocol: parts.get(1).map(|p| p.trim().to_string()), - }); - } + } + "ONBUILD" => { + let args = args.trim(); + if args.is_empty() { + return Err(DockerError::DockerfileError( + "ONBUILD requires one argument".to_string(), + )); } - "ONBUILD" => { - let args = args.trim(); - if args.is_empty() { - return Err(DockerError::DockerfileError( - "ONBUILD requires one argument".to_string(), - )); - } - // Create a new config to parse the ONBUILD command - let mut onbuild_config = DockerfileConfig { - base_image: String::new(), - commands: Vec::new(), - }; + // Create a new config to parse the ONBUILD command + let mut onbuild_config = DockerfileConfig { + base_image: String::new(), + commands: Vec::new(), + }; - // Parse the ONBUILD command recursively - Self::parse_command(&mut onbuild_config, args)?; + // Parse the ONBUILD command recursively + parse_command(&mut onbuild_config, args)?; - // Get the parsed command - if let Some(cmd) = onbuild_config.commands.pop() { - config.commands.push(DockerCommand::Onbuild { - command: Box::new(cmd), - }); - } else { - return Err(DockerError::DockerfileError( - "Invalid ONBUILD command".to_string(), - )); - } + // Get the parsed command + if let Some(cmd) = onbuild_config.commands.pop() { + config.commands.push(DockerCommand::Onbuild { + command: Box::new(cmd), + }); + } else { + return Err(DockerError::DockerfileError( + "Invalid ONBUILD command".to_string(), + )); } - "ADD" => { - let (chown, parts) = Self::parse_chown_option(args); - let sources_and_dest = if args.trim().starts_with('[') { - // Handle JSON array format - serde_json::from_str::>(args).map_err(|e| { - DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) - })? - } else { - // Handle space-separated format - shell_words::split(&parts) - .map_err(|e| DockerError::DockerfileError(e.to_string()))? - }; + } + "ADD" => { + let (chown, parts) = parse_chown_option(args); + let sources_and_dest = if args.trim().starts_with('[') { + // Handle JSON array format + serde_json::from_str::>(args).map_err(|e| { + DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) + })? + } else { + // Handle space-separated format + shell_words::split(&parts) + .map_err(|e| DockerError::DockerfileError(e.to_string()))? + }; - if sources_and_dest.len() >= 2 { - let dest = sources_and_dest.last().unwrap().to_string(); - let sources = sources_and_dest[..sources_and_dest.len() - 1].to_vec(); - config.commands.push(DockerCommand::Add { - sources, - dest, - chown, - }); - } else { - return Err(DockerError::DockerfileError( - "ADD requires at least one source and destination".to_string(), - )); - } - } - "ARG" => { - let parts: Vec<&str> = args.split('=').collect(); - config.commands.push(DockerCommand::Arg { - name: parts[0].trim().to_string(), - default_value: parts.get(1).map(|v| v.trim().to_string()), + if sources_and_dest.len() >= 2 { + let dest = sources_and_dest.last().unwrap().to_string(); + let sources = sources_and_dest[..sources_and_dest.len() - 1].to_vec(); + config.commands.push(DockerCommand::Add { + sources, + dest, + chown, }); + } else { + return Err(DockerError::DockerfileError( + "ADD requires at least one source and destination".to_string(), + )); } - "CMD" => { - let command = if args.trim().starts_with('[') { - // Handle JSON array format - serde_json::from_str::>(args).map_err(|e| { - DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) - })? - } else { - // Handle space-separated format - shell_words::split(args) - .map_err(|e| DockerError::DockerfileError(e.to_string()))? - }; - config.commands.push(DockerCommand::Cmd { command }); - } - "ENTRYPOINT" => { - let command = if args.trim().starts_with('[') { - // Handle JSON array format - serde_json::from_str::>(args).map_err(|e| { - DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) - })? - } else { - // Handle space-separated format - shell_words::split(args) - .map_err(|e| DockerError::DockerfileError(e.to_string()))? - }; - config.commands.push(DockerCommand::Entrypoint { command }); + } + "ARG" => { + let parts: Vec<&str> = args.split('=').collect(); + config.commands.push(DockerCommand::Arg { + name: parts[0].trim().to_string(), + default_value: parts.get(1).map(|v| v.trim().to_string()), + }); + } + "CMD" => { + let command = if args.trim().starts_with('[') { + // Handle JSON array format + serde_json::from_str::>(args).map_err(|e| { + DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) + })? + } else { + // Handle space-separated format + shell_words::split(args).map_err(|e| DockerError::DockerfileError(e.to_string()))? + }; + config.commands.push(DockerCommand::Cmd { command }); + } + "ENTRYPOINT" => { + let command = if args.trim().starts_with('[') { + // Handle JSON array format + serde_json::from_str::>(args).map_err(|e| { + DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) + })? + } else { + // Handle space-separated format + shell_words::split(args).map_err(|e| DockerError::DockerfileError(e.to_string()))? + }; + config.commands.push(DockerCommand::Entrypoint { command }); + } + "ENV" => { + let parts: Vec<&str> = args.splitn(2, '=').collect(); + if parts.len() == 2 { + config.commands.push(DockerCommand::Env { + key: parts[0].trim().to_string(), + value: parts[1].trim().to_string(), + }); } - "ENV" => { - let parts: Vec<&str> = args.splitn(2, '=').collect(); - if parts.len() == 2 { - config.commands.push(DockerCommand::Env { - key: parts[0].trim().to_string(), - value: parts[1].trim().to_string(), - }); - } + } + "HEALTHCHECK" => { + if args.trim() == "NONE" { + return Ok(()); } - "HEALTHCHECK" => { - if args.trim() == "NONE" { - return Ok(()); - } - let mut parts = args.split_whitespace(); - let mut interval = None; - let mut timeout = None; - let mut start_period = None; - let mut retries = None; - let mut command = Vec::new(); + let mut parts = args.split_whitespace(); + let mut interval = None; + let mut timeout = None; + let mut start_period = None; + let mut retries = None; + let mut command = Vec::new(); - // Find CMD part - let mut found_cmd = false; - let mut cmd_parts = Vec::new(); - while let Some(part) = parts.next() { - if part == "CMD" { - found_cmd = true; - cmd_parts = parts.collect(); - break; - } - - // Handle --flag=value format - if part.starts_with("--") { - let flag_part: Vec<&str> = part.splitn(2, '=').collect(); - match flag_part[0] { - "--interval" | "--timeout" | "--start-period" => { - let value = if flag_part.len() == 2 { - Some(flag_part[1].to_string()) - } else { - parts.next().map(|s| s.to_string()) - }; - - if value.is_none() { - return Err(DockerError::DockerfileError(format!( - "Missing value for {} flag", - flag_part[0] - ))); - } + // Find CMD part + let mut found_cmd = false; + let mut cmd_parts = Vec::new(); + while let Some(part) = parts.next() { + if part == "CMD" { + found_cmd = true; + cmd_parts = parts.collect(); + break; + } - match flag_part[0] { - "--interval" => interval = value, - "--timeout" => timeout = value, - "--start-period" => start_period = value, - _ => unreachable!(), - } - } - "--retries" => { - let value = if flag_part.len() == 2 { - flag_part[1].parse().ok() - } else { - parts.next().and_then(|s| s.parse().ok()) - }; + // Handle --flag=value format + if part.starts_with("--") { + let flag_part: Vec<&str> = part.splitn(2, '=').collect(); + match flag_part[0] { + "--interval" | "--timeout" | "--start-period" => { + let value = if flag_part.len() == 2 { + Some(flag_part[1].to_string()) + } else { + parts.next().map(|s| s.to_string()) + }; - if value.is_none() { - return Err(DockerError::DockerfileError( - "Invalid value for --retries flag".to_string(), - )); - } - retries = value; - } - _ => { + if value.is_none() { return Err(DockerError::DockerfileError(format!( - "Invalid HEALTHCHECK flag: {}", + "Missing value for {} flag", flag_part[0] ))); } - } - continue; - } - } - - if !found_cmd { - return Err(DockerError::DockerfileError( - "HEALTHCHECK must include CMD".to_string(), - )); - } - - if let Ok(cmd) = shell_words::split(&cmd_parts.join(" ")) { - command = cmd; - } - - if !command.is_empty() { - config.commands.push(DockerCommand::Healthcheck { - command, - interval, - timeout, - start_period, - retries, - }); - } - } - "LABEL" => { - let mut labels = HashMap::new(); - let mut current_key = String::new(); - let mut current_value = String::new(); - let mut in_quotes = false; - for c in args.chars() { - match c { - '"' => in_quotes = !in_quotes, - '=' if !in_quotes && current_key.is_empty() => { - current_key = current_value.trim().to_string(); - current_value.clear(); + match flag_part[0] { + "--interval" => interval = value, + "--timeout" => timeout = value, + "--start-period" => start_period = value, + _ => unreachable!(), + } } - ' ' if !in_quotes && !current_key.is_empty() => { - if !current_value.is_empty() { - labels.insert( - current_key.trim_matches('"').to_string(), - current_value.trim_matches('"').to_string(), - ); - current_key.clear(); - current_value.clear(); + "--retries" => { + let value = if flag_part.len() == 2 { + flag_part[1].parse().ok() + } else { + parts.next().and_then(|s| s.parse().ok()) + }; + + if value.is_none() { + return Err(DockerError::DockerfileError( + "Invalid value for --retries flag".to_string(), + )); } + retries = value; } _ => { - current_value.push(c); + return Err(DockerError::DockerfileError(format!( + "Invalid HEALTHCHECK flag: {}", + flag_part[0] + ))); } } + continue; } - - // Handle the last key-value pair - if !current_key.is_empty() && !current_value.is_empty() { - labels.insert( - current_key.trim_matches('"').to_string(), - current_value.trim_matches('"').to_string(), - ); - } - - config.commands.push(DockerCommand::Label { labels }); - } - "MAINTAINER" => { - config.commands.push(DockerCommand::Maintainer { - name: args.to_string(), - }); - } - "RUN" => { - config.commands.push(DockerCommand::Run { - command: args.to_string(), - }); } - "SHELL" => { - let shell = if args.trim().starts_with('[') { - // Handle JSON array format - serde_json::from_str::>(args).map_err(|e| { - DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) - })? - } else { - // Handle space-separated format - shell_words::split(args) - .map_err(|e| DockerError::DockerfileError(e.to_string()))? - }; - config.commands.push(DockerCommand::Shell { shell }); + + if !found_cmd { + return Err(DockerError::DockerfileError( + "HEALTHCHECK must include CMD".to_string(), + )); } - "STOPSIGNAL" => { - config.commands.push(DockerCommand::StopSignal { - signal: args.to_string(), - }); + + if let Ok(cmd) = shell_words::split(&cmd_parts.join(" ")) { + command = cmd; } - "USER" => { - let parts: Vec<&str> = args.split(':').collect(); - config.commands.push(DockerCommand::User { - user: parts[0].to_string(), - group: parts.get(1).map(|g| g.to_string()), + + if !command.is_empty() { + config.commands.push(DockerCommand::Healthcheck { + command, + interval, + timeout, + start_period, + retries, }); } - "VOLUME" => { - let paths = if args.trim().starts_with('[') { - // Handle JSON array format - serde_json::from_str::>(args).map_err(|e| { - DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) - })? - } else { - // Handle space-separated format - shell_words::split(args) - .map_err(|e| DockerError::DockerfileError(e.to_string()))? - }; - config.commands.push(DockerCommand::Volume { paths }); - } - "WORKDIR" => { - config.commands.push(DockerCommand::Workdir { - path: args.to_string(), - }); + } + "LABEL" => { + let mut labels = HashMap::new(); + let mut current_key = String::new(); + let mut current_value = String::new(); + let mut in_quotes = false; + + for c in args.chars() { + match c { + '"' => in_quotes = !in_quotes, + '=' if !in_quotes && current_key.is_empty() => { + current_key = current_value.trim().to_string(); + current_value.clear(); + } + ' ' if !in_quotes && !current_key.is_empty() => { + if !current_value.is_empty() { + labels.insert( + current_key.trim_matches('"').to_string(), + current_value.trim_matches('"').to_string(), + ); + current_key.clear(); + current_value.clear(); + } + } + _ => { + current_value.push(c); + } + } } - _ => { - return Err(DockerError::DockerfileError(format!( - "Unknown command: {}", - command - ))); + + // Handle the last key-value pair + if !current_key.is_empty() && !current_value.is_empty() { + labels.insert( + current_key.trim_matches('"').to_string(), + current_value.trim_matches('"').to_string(), + ); } + + config.commands.push(DockerCommand::Label { labels }); + } + "MAINTAINER" => { + config.commands.push(DockerCommand::Maintainer { + name: args.to_string(), + }); + } + "RUN" => { + config.commands.push(DockerCommand::Run { + command: args.to_string(), + }); + } + "SHELL" => { + let shell = if args.trim().starts_with('[') { + // Handle JSON array format + serde_json::from_str::>(args).map_err(|e| { + DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) + })? + } else { + // Handle space-separated format + shell_words::split(args).map_err(|e| DockerError::DockerfileError(e.to_string()))? + }; + config.commands.push(DockerCommand::Shell { shell }); + } + "STOPSIGNAL" => { + config.commands.push(DockerCommand::StopSignal { + signal: args.to_string(), + }); + } + "USER" => { + let parts: Vec<&str> = args.split(':').collect(); + config.commands.push(DockerCommand::User { + user: parts[0].to_string(), + group: parts.get(1).map(|g| g.to_string()), + }); + } + "VOLUME" => { + let paths = if args.trim().starts_with('[') { + // Handle JSON array format + serde_json::from_str::>(args).map_err(|e| { + DockerError::DockerfileError(format!("Invalid JSON array: {}", e)) + })? + } else { + // Handle space-separated format + shell_words::split(args).map_err(|e| DockerError::DockerfileError(e.to_string()))? + }; + config.commands.push(DockerCommand::Volume { paths }); + } + "WORKDIR" => { + config.commands.push(DockerCommand::Workdir { + path: args.to_string(), + }); + } + _ => { + return Err(DockerError::DockerfileError(format!( + "Unknown command: {}", + command + ))); } - Ok(()) } + Ok(()) +} - fn parse_chown_option(args: &str) -> (Option, String) { - if args.starts_with("--chown=") { - let parts: Vec<&str> = args.splitn(2, ' ').collect(); - let chown = parts[0].trim_start_matches("--chown=").to_string(); - (Some(chown), parts.get(1).unwrap_or(&"").to_string()) - } else { - (None, args.to_string()) - } +fn parse_chown_option(args: &str) -> (Option, String) { + if args.starts_with("--chown=") { + let parts: Vec<&str> = args.splitn(2, ' ').collect(); + let chown = parts[0].trim_start_matches("--chown=").to_string(); + (Some(chown), parts.get(1).unwrap_or(&"").to_string()) + } else { + (None, args.to_string()) } } diff --git a/src/tests/docker_file.rs b/src/parser/docker_file/tests.rs similarity index 74% rename from src/tests/docker_file.rs rename to src/parser/docker_file/tests.rs index 85a450c..efe566b 100644 --- a/src/tests/docker_file.rs +++ b/src/parser/docker_file/tests.rs @@ -1,30 +1,11 @@ use crate::config::docker_file::{DockerCommand, DockerfileConfig}; -use crate::{ - parser::docker_file::DockerfileParser, with_docker_cleanup, DockerBuilder, DockerError, -}; -use bollard::container::ListContainersOptions; -use futures_util::TryStreamExt; +use crate::test_fixtures::get_tangle_dockerfile; +use crate::DockerError; use std::collections::HashMap; -use std::process::Command; -use std::time::Duration; - -use super::fixtures::get_tangle_dockerfile; - -pub fn is_docker_running() -> bool { - Command::new("docker") - .arg("info") - .output() - .map(|output| output.status.success()) - .unwrap_or(false) -} #[tokio::test] async fn test_dockerfile_parsing() { - let builder = DockerBuilder::new().await.unwrap(); - let config = builder - .from_dockerfile(get_tangle_dockerfile()) - .await - .unwrap(); + let config = DockerfileConfig::parse_from_path(get_tangle_dockerfile()).unwrap(); assert_eq!(config.base_image, "ubuntu:22.04"); assert_eq!(config.commands.len(), 11); @@ -77,129 +58,6 @@ async fn test_dockerfile_parsing() { } } -with_docker_cleanup!(test_dockerfile_deployment, async |test_id: &str| { - if !is_docker_running() { - println!("Skipping test: Docker is not running"); - return; - } - - let builder = DockerBuilder::new().await.unwrap(); - let network_name = format!("test-network-{}", test_id); - - let mut network_labels = HashMap::new(); - network_labels.insert("test_id".to_string(), test_id.to_string()); - - // Create network with retry mechanism - builder - .create_network_with_retry( - &network_name, - 3, - Duration::from_secs(2), - Some(network_labels), - ) - .await - .unwrap(); - - // Pull alpine image first - println!("Pulling alpine image..."); - builder - .get_client() - .create_image( - Some(bollard::image::CreateImageOptions { - from_image: "alpine", - tag: "latest", - ..Default::default() - }), - None, - None, - ) - .try_collect::>() - .await - .unwrap(); - println!("Image pull complete"); - - // Create a simple test Dockerfile config - let config = DockerfileConfig { - base_image: "alpine:latest".to_string(), - commands: vec![ - DockerCommand::Run { - command: "echo 'test' > /test.txt".to_string(), - }, - DockerCommand::Label { - labels: { - let mut labels = HashMap::new(); - labels.insert("test_id".to_string(), test_id.to_string()); - labels - }, - }, - DockerCommand::Cmd { - command: vec!["sleep".to_string(), "30".to_string()], // Keep container running - }, - ], - }; - - let tag = format!("test-dockerfile-{}", test_id); - println!("Building image with tag: {}", tag); - - // Deploy using our config with network - let container_id = builder - .deploy_dockerfile(&config, &tag, None, None, Some(network_name.clone()), None) - .await - .unwrap(); - println!("Container created with ID: {}", container_id); - - // Add a small delay after creation - tokio::time::sleep(Duration::from_millis(100)).await; - - // Verify container is running - let mut filters = std::collections::HashMap::new(); - filters.insert("id".to_string(), vec![container_id.clone()]); - filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); - - let mut retries = 5; - let mut container_running = false; - while retries > 0 { - println!("Checking container state, attempt {}", 6 - retries); - if let Ok(containers) = builder - .get_client() - .list_containers(Some(ListContainersOptions { - all: true, - filters: filters.clone(), - ..Default::default() - })) - .await - { - if !containers.is_empty() { - println!("Container found and running"); - container_running = true; - break; - } else { - println!("No containers found matching filters"); - } - } else { - println!("Error listing containers"); - } - retries -= 1; - tokio::time::sleep(Duration::from_millis(500)).await; - } - - // If container not running, get more details - if !container_running { - println!("Container not found with filters. Checking container inspect..."); - if let Ok(inspect) = builder - .get_client() - .inspect_container(&container_id, None) - .await - { - println!("Container inspect result: {:?}", inspect); - } else { - println!("Failed to inspect container"); - } - } - - assert!(container_running, "Container should be running"); -}); - #[tokio::test] async fn test_dockerfile_content_generation() { let config = DockerfileConfig { @@ -227,7 +85,7 @@ async fn test_dockerfile_content_generation() { ], }; - let content = config.to_dockerfile_content(); + let content = config.to_string(); let expected = r#"FROM rust:1.70 RUN cargo build COPY ./target /app @@ -267,7 +125,7 @@ async fn test_all_dockerfile_commands() { CMD ["--help"] "#; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); assert_eq!(config.base_image, "ubuntu:22.04"); let mut commands_iter = config.commands.iter(); @@ -375,8 +233,7 @@ async fn test_all_dockerfile_commands() { // Test RUN if let Some(DockerCommand::Run { command }) = commands_iter.next() { - assert!(command.contains("apt-get update")); - assert!(command.contains("apt-get install -y python3")); + assert_eq!(command, "apt-get update && apt-get install -y python3"); } else { panic!("Expected RUN command"); } @@ -481,21 +338,21 @@ async fn test_all_dockerfile_commands() { #[tokio::test] async fn test_invalid_dockerfile_syntax() { let content = "COPY"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Invalid command syntax" )); let content = "COPY src"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "COPY requires source and destination" )); let content = "UNKNOWN command"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Unknown command: UNKNOWN" @@ -505,14 +362,14 @@ async fn test_invalid_dockerfile_syntax() { #[tokio::test] async fn test_invalid_onbuild() { let content = "ONBUILD"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Invalid command syntax" )); let content = "ONBUILD INVALID something"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Unknown command: INVALID" @@ -520,7 +377,7 @@ async fn test_invalid_onbuild() { // Test valid ONBUILD commands let content = "ONBUILD ADD . /usr/src/app"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); match &config.commands[0] { DockerCommand::Onbuild { command } => match command.as_ref() { DockerCommand::Add { @@ -538,7 +395,7 @@ async fn test_invalid_onbuild() { } let content = "ONBUILD RUN mvn install"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); match &config.commands[0] { DockerCommand::Onbuild { command } => match command.as_ref() { DockerCommand::Run { command } => { @@ -553,21 +410,21 @@ async fn test_invalid_onbuild() { #[tokio::test] async fn test_invalid_healthcheck() { let content = "HEALTHCHECK --invalid-flag CMD curl localhost"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Invalid HEALTHCHECK flag: --invalid-flag" )); let content = "HEALTHCHECK --interval"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Missing value for --interval flag" )); let content = "HEALTHCHECK --interval 30s"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "HEALTHCHECK must include CMD" @@ -577,7 +434,7 @@ async fn test_invalid_healthcheck() { #[tokio::test] async fn test_empty_dockerfile() { let content = ""; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); assert!(config.base_image.is_empty()); assert!(config.commands.is_empty()); } @@ -587,14 +444,14 @@ async fn test_comments_and_empty_lines() { let content = r#" # This is a comment FROM ubuntu:22.04 - + # Another comment RUN echo "test" - + # Final comment "#; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); assert_eq!(config.base_image, "ubuntu:22.04"); assert_eq!(config.commands.len(), 1); } @@ -602,7 +459,7 @@ async fn test_comments_and_empty_lines() { #[tokio::test] async fn test_expose_multiple_ports() { let content = "EXPOSE 30333 9933 9944 9615"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); let expected_ports = [30333, 9933, 9944, 9615]; assert_eq!(config.commands.len(), expected_ports.len()); @@ -619,7 +476,7 @@ async fn test_expose_multiple_ports() { // Test mixed format let content = "EXPOSE 80/tcp 443 8080/udp 9000"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); assert_eq!(config.commands.len(), 4); let expected = [ @@ -646,7 +503,7 @@ async fn test_expose_multiple_ports() { #[tokio::test] async fn test_tangle_expose_format() { let content = "EXPOSE 30333 9933 9944 9615"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); let expected_ports = [30333, 9933, 9944, 9615]; assert_eq!(config.commands.len(), expected_ports.len()); @@ -663,7 +520,7 @@ async fn test_tangle_expose_format() { // Test error case let content = "EXPOSE 30333 invalid 9944"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg == "Invalid port number: invalid" @@ -742,7 +599,7 @@ async fn test_onbuild_commands() { ]; for (content, expected_cmd) in test_cases { - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); match &config.commands[0] { DockerCommand::Onbuild { command } => { assert_eq!(command.as_ref(), &expected_cmd); @@ -756,7 +613,7 @@ async fn test_onbuild_commands() { async fn test_volume_formats() { // Test space-separated format let content = "VOLUME /data /config /cache"; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); match &config.commands[0] { DockerCommand::Volume { paths } => { assert_eq!(paths, &vec!["/data", "/config", "/cache"]); @@ -766,7 +623,7 @@ async fn test_volume_formats() { // Test JSON array format let content = r#"VOLUME ["/data", "/config"]"#; - let config = DockerfileParser::parse(content).unwrap(); + let config = DockerfileConfig::parse(content).unwrap(); match &config.commands[0] { DockerCommand::Volume { paths } => { assert_eq!(paths, &vec!["/data", "/config"]); @@ -776,7 +633,7 @@ async fn test_volume_formats() { // Test error case let content = "VOLUME [invalid json"; - let result = DockerfileParser::parse(content); + let result = DockerfileConfig::parse(content); assert!(matches!( result, Err(DockerError::DockerfileError(msg)) if msg.contains("Invalid JSON array") diff --git a/src/parser/env.rs b/src/parser/env.rs new file mode 100644 index 0000000..93590fd --- /dev/null +++ b/src/parser/env.rs @@ -0,0 +1,266 @@ +use crate::DockerError; +use regex::Regex; +use std::collections::HashMap; +use std::sync::LazyLock; + +/// Parses an environment file into a HashMap of key-value pairs +/// +/// # Format +/// +/// The following formats are supported: +/// - Comments (lines starting with #) +/// - Empty lines (ignored) +/// - KEY=value format +/// - Quoted values (quotes are stripped) +/// +/// Variable names must: +/// - Start with a letter or underscore +/// - Contain only alphanumeric characters and underscores +pub fn parse_env_file(content: &str) -> Result, DockerError> { + let mut vars = HashMap::new(); + let valid_key = Regex::new(r"^[a-zA-Z_][a-zA-Z0-9_]*$").unwrap(); + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + if let Some((key, value)) = line.split_once('=') { + let key = key.trim(); + if valid_key.is_match(key) { + vars.insert(key.to_string(), value.trim().trim_matches('"').to_string()); + } + } + } + + Ok(vars) +} + +/// Substitutes environment variables in a string +/// +/// Supports the following formats: +/// - ${VAR} +/// - ${VAR:-default} +/// - $VAR +pub fn substitute_env_vars( + content: &str, + env_vars: &HashMap, +) -> Result { + static VAR_DEFAULT_SYNTAX: LazyLock = + LazyLock::new(|| Regex::new(r"\$\{([^{}:]+):-([^{}]*)\}").unwrap()); + static VAR_CURLY_SYNTAX: LazyLock = + LazyLock::new(|| Regex::new(r"\$\{([^{}]+)\}").unwrap()); + static VAR_SYNTAX: LazyLock = + LazyLock::new(|| Regex::new(r"\$([a-zA-Z_][a-zA-Z0-9_]*)").unwrap()); + + let mut result = content.to_string(); + + // Handle ${VAR:-default} syntax + result = VAR_DEFAULT_SYNTAX + .replace_all(&result, |caps: ®ex::Captures| { + let var_name = caps.get(1).unwrap().as_str(); + let default_value = caps.get(2).unwrap().as_str(); + match env_vars.get(var_name) { + Some(value) if value.is_empty() => default_value.to_string(), + Some(value) => value.to_string(), + None => default_value.to_string(), + } + }) + .to_string(); + + // Handle ${VAR} syntax + result = VAR_CURLY_SYNTAX + .replace_all(&result, |caps: ®ex::Captures| { + let var_name = caps.get(1).unwrap().as_str(); + env_vars + .get(var_name) + .map(|v| v.as_str()) + .unwrap_or("") + .to_string() + }) + .to_string(); + + // Handle $VAR syntax + result = VAR_SYNTAX + .replace_all(&result, |caps: ®ex::Captures| { + let var_name = caps.get(1).unwrap().as_str(); + env_vars + .get(var_name) + .map(|v| v.as_str()) + .unwrap_or("") + .to_string() + }) + .to_string(); + + Ok(result) +} + +#[cfg(test)] +#[allow(clippy::literal_string_with_formatting_args)] +mod tests { + use super::*; + use std::collections::HashMap; + use std::fs; + use tempfile::NamedTempFile; + + #[test] + fn test_basic_env_substitution() { + let mut env_vars = HashMap::new(); + env_vars.insert("IMAGE_TAG__L2GETH".to_string(), "v1.0.0".to_string()); + env_vars.insert("SIMPLE_VAR".to_string(), "value".to_string()); + + let content = r#" + services: + l2geth: + image: ethereumoptimism/l2geth:${IMAGE_TAG__L2GETH:-latest} + other: + image: something:${UNDEFINED_VAR:-default} + simple: + value: $SIMPLE_VAR + "#; + + let result = substitute_env_vars(content, &env_vars).unwrap(); + + assert!(result.contains("ethereumoptimism/l2geth:v1.0.0")); + assert!(result.contains("something:default")); + assert!(result.contains("value: value")); + } + + #[test] + fn test_nested_env_substitution() { + let mut env_vars = HashMap::new(); + env_vars.insert("PORT".to_string(), "8545".to_string()); + env_vars.insert("HOST".to_string(), "localhost".to_string()); + + let content = r#" + services: + geth: + ports: + - "${PORT:-3000}:${PORT:-3000}" + environment: + - URL=http://${HOST:-127.0.0.1}:${PORT:-3000} + - SIMPLE=$HOST:$PORT + "#; + + let result = substitute_env_vars(content, &env_vars).unwrap(); + + assert!(result.contains("8545:8545")); + assert!(result.contains("http://localhost:8545")); + assert!(result.contains("SIMPLE=localhost:8545")); + } + + #[test] + fn test_env_file_parsing() { + let env_content = r#" + # Comment line + EMPTY= + QUOTED="quoted value" + UNQUOTED=unquoted value + WITH_SPACES= spaced value + "#; + + let temp_file = NamedTempFile::new().unwrap(); + fs::write(&temp_file, env_content).unwrap(); + + let vars = parse_env_file(env_content).unwrap(); + + assert_eq!(vars.get("EMPTY").unwrap(), ""); + assert_eq!(vars.get("QUOTED").unwrap(), "quoted value"); + assert_eq!(vars.get("UNQUOTED").unwrap(), "unquoted value"); + assert_eq!(vars.get("WITH_SPACES").unwrap(), "spaced value"); + } + + #[test] + fn test_complex_substitutions() { + let mut env_vars = HashMap::new(); + env_vars.insert("VERSION".to_string(), "1.0".to_string()); + env_vars.insert("MEMORY".to_string(), "1G".to_string()); + + let content = r#" + services: + app: + image: myapp:${VERSION:-latest} + deploy: + requirements: + limits: + memory: ${MEMORY:-512M} + cpus: ${CPUS:-1.0} + environment: + - CONFIG=${CONFIG_PATH:-/etc/config} + - COMBINED=${VERSION:-0.0.1}-${MEMORY:-256M} + "#; + + let result = substitute_env_vars(content, &env_vars).unwrap(); + + assert!(result.contains("myapp:1.0")); + assert!(result.contains("memory: 1G")); + assert!(result.contains("cpus: 1.0")); // Uses default + assert!(result.contains("CONFIG=/etc/config")); // Uses default + assert!(result.contains("COMBINED=1.0-1G")); + } + + #[test] + fn test_invalid_env_file() { + let env_content = r#" + VALID_KEY=value + INVALID+KEY=value + 123INVALID=value + _VALID=value + ALSO-INVALID=value + "#; + let vars = parse_env_file(env_content).unwrap(); + + assert!(vars.contains_key("VALID_KEY")); + assert!(vars.contains_key("_VALID")); + assert!(!vars.contains_key("INVALID+KEY")); + assert!(!vars.contains_key("123INVALID")); + assert!(!vars.contains_key("ALSO-INVALID")); + assert_eq!(vars.len(), 2); + } + + #[test] + fn test_empty_and_missing_variables() { + let mut env_vars = HashMap::new(); + env_vars.insert("EMPTY".to_string(), "".to_string()); + + let content = r#" + services: + app: + image: test:${EMPTY:-default} + command: ${MISSING} + environment: + - UNSET=${UNDEFINED:-} + - WITH_DEFAULT=${UNDEFINED:-default_value} + "#; + + let result = substitute_env_vars(content, &env_vars).unwrap(); + + assert!( + result.contains("test:default"), + "Empty var should use default" + ); + assert!(result.contains("command: "), "Missing var should be empty"); + assert!( + result.contains("UNSET="), + "Undefined with empty default should be empty" + ); + assert!( + result.contains("WITH_DEFAULT=default_value"), + "Undefined should use default value" + ); + } + + #[test] + fn test_empty_default_values() { + let env_vars = HashMap::new(); + let content = r#" + TEST1=${VAR:-} + TEST2=${OTHER_VAR:-default} + "#; + + let result = substitute_env_vars(content, &env_vars).unwrap(); + assert!(result.contains("TEST1=")); + assert!(result.contains("TEST2=default")); + } +} diff --git a/src/parser/mod.rs b/src/parser/mod.rs index 285dfa1..369f103 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -1,5 +1,6 @@ pub mod compose; pub mod docker_file; +pub(crate) mod env; pub use compose::*; pub use docker_file::*; diff --git a/src/tests/fixtures.rs b/src/test_fixtures.rs similarity index 77% rename from src/tests/fixtures.rs rename to src/test_fixtures.rs index f2a0ecd..e4bfe5b 100644 --- a/src/tests/fixtures.rs +++ b/src/test_fixtures.rs @@ -1,7 +1,5 @@ use std::path::PathBuf; -use super::fixtures_path; - pub fn get_tangle_dockerfile() -> PathBuf { fixtures_path().join("tangle-dockerfile") } @@ -13,3 +11,7 @@ pub fn get_local_reth_compose() -> PathBuf { pub fn get_reth_archive_compose() -> PathBuf { fixtures_path().join("reth-archive-docker-compose.yml") } + +fn fixtures_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures") +} diff --git a/src/tests/compose_health.rs b/src/tests/compose_health.rs deleted file mode 100644 index ab14d4e..0000000 --- a/src/tests/compose_health.rs +++ /dev/null @@ -1,114 +0,0 @@ -use super::docker_file::is_docker_running; -use crate::{ - config::{ - compose::{ComposeConfig, Service}, - HealthCheck, - }, - with_docker_cleanup, DockerBuilder, -}; -use futures_util::TryStreamExt; -use std::{collections::HashMap, time::Duration}; - -with_docker_cleanup!(test_healthcheck, async |test_id: &str| { - if !is_docker_running() { - println!("Skipping test: Docker is not running"); - return; - } - - let builder = DockerBuilder::new().await.unwrap(); - let network_name = format!("test-network-{}", test_id); - - let mut network_labels = HashMap::new(); - network_labels.insert("test_id".to_string(), test_id.to_string()); - - // Create network with retry mechanism - builder - .create_network_with_retry( - &network_name, - 3, - Duration::from_secs(2), - Some(network_labels), - ) - .await - .unwrap(); - - let service_name = format!("healthy-service-{}", test_id); - - // Pull nginx image first - builder - .get_client() - .create_image( - Some(bollard::image::CreateImageOptions { - from_image: "nginx", - tag: "latest", - ..Default::default() - }), - None, - None, - ) - .try_collect::>() - .await - .unwrap(); - - // Create a service with healthcheck - let mut services = HashMap::new(); - let mut labels = HashMap::new(); - labels.insert("test_id".to_string(), test_id.to_string()); - - services.insert( - service_name.clone(), - Service { - image: Some("nginx:latest".to_string()), - healthcheck: Some(HealthCheck { - endpoint: "http://localhost/".to_string(), - method: "GET".to_string(), - expected_status: 200, - body: None, - interval: Duration::from_secs(1), - timeout: Duration::from_secs(3), - retries: 3, - }), - ports: Some(vec!["8080:80".to_string()]), - networks: Some(vec![network_name.clone()]), - labels: Some(labels), - ..Default::default() - }, - ); - - let mut config = ComposeConfig { - version: "3".to_string(), - services, - volumes: HashMap::new(), - }; - - let container_ids = builder.deploy_compose(&mut config).await.unwrap(); - let container_id = container_ids.get(&service_name).unwrap(); - - // Wait for container to be healthy - builder.wait_for_container(container_id).await.unwrap(); - - // Verify healthcheck configuration - let inspect = builder - .get_client() - .inspect_container(container_id, None) - .await - .unwrap(); - - if let Some(config) = inspect.config { - if let Some(healthcheck) = config.healthcheck { - assert_eq!( - healthcheck.test, - Some(vec![ - "CMD-SHELL".to_string(), - format!( - "curl -X GET {} -s -f -o /dev/null -w '%{{http_code}}' | grep -q {}", - "http://localhost/", "200" - ) - ]) - ); - assert_eq!(healthcheck.interval, Some(1_000_000_000)); - assert_eq!(healthcheck.timeout, Some(3_000_000_000)); - assert_eq!(healthcheck.retries, Some(3)); - } - } -}); diff --git a/src/tests/compose_requirements.rs b/src/tests/compose_requirements.rs deleted file mode 100644 index 0f96e34..0000000 --- a/src/tests/compose_requirements.rs +++ /dev/null @@ -1,119 +0,0 @@ -use super::docker_file::is_docker_running; -use crate::builder::compose::parse_memory_string; -use crate::config::SystemRequirements; -use crate::DockerError; -use crate::{ - config::compose::{ComposeConfig, Service}, - with_docker_cleanup, DockerBuilder, -}; -use std::collections::HashMap; -use std::time::Duration; - -with_docker_cleanup!(test_resource_limits, async |test_id: &str| { - if !is_docker_running() { - println!("Skipping test: Docker is not running"); - return; - } - - let builder = DockerBuilder::new().await.unwrap(); - let network_name = format!("test-network-{}", test_id); - - let mut network_labels = HashMap::new(); - network_labels.insert("test_id".to_string(), test_id.to_string()); - - // Create network with retry mechanism - builder - .create_network_with_retry( - &network_name, - 3, - Duration::from_secs(2), - Some(network_labels), - ) - .await - .unwrap(); - - let service_name = format!("test-service-{}", test_id); - - // Create a service with resource limits - let mut services = HashMap::new(); - let mut labels = HashMap::new(); - labels.insert("test_id".to_string(), test_id.to_string()); - - services.insert( - service_name.clone(), - Service { - image: Some("alpine:latest".to_string()), - command: Some(vec!["sleep".to_string(), "30".to_string()]), - requirements: Some(SystemRequirements { - min_memory_gb: 1, - min_disk_gb: 1, - min_bandwidth_mbps: 100, - required_ports: vec![], - data_directory: "/tmp".to_string(), - cpu_limit: Some(0.5), - memory_limit: Some("512M".to_string()), - memory_swap: Some("1G".to_string()), - memory_reservation: Some("256M".to_string()), - cpu_shares: Some(512), - cpuset_cpus: Some("0,1".to_string()), - }), - networks: Some(vec![network_name.clone()]), - labels: Some(labels), - ..Default::default() - }, - ); - - let mut config = ComposeConfig { - version: "3".to_string(), - services, - volumes: HashMap::new(), - }; - - let container_ids = builder.deploy_compose(&mut config).await.unwrap(); - let container_id = container_ids.get(&service_name).unwrap(); - - // Verify container configuration - let inspect = builder - .get_client() - .inspect_container(container_id, None) - .await - .unwrap(); - - if let Some(host_config) = inspect.host_config { - // Verify memory limits - assert_eq!( - host_config.memory, - Some(parse_memory_string("512M").unwrap() as i64) - ); - assert_eq!( - host_config.memory_swap, - Some(parse_memory_string("1G").unwrap() as i64) - ); - assert_eq!( - host_config.memory_reservation, - Some(parse_memory_string("256M").unwrap() as i64) - ); - - // Verify CPU limits - assert_eq!(host_config.nano_cpus, Some((0.5 * 1e9) as i64)); - assert_eq!(host_config.cpu_shares, Some(512)); - assert_eq!(host_config.cpuset_cpus, Some("0,1".to_string())); - } -}); - -#[tokio::test] -async fn test_invalid_resource_limits() { - let memory_tests = vec![ - ("1X", "Invalid memory unit: X"), - ("abc", "Invalid memory value: abc"), - ("12.5G", "Invalid memory value: 12.5G"), - ]; - - for (input, expected_error) in memory_tests { - let result = parse_memory_string(input); - assert!(matches!( - result, - Err(DockerError::InvalidResourceLimit(msg)) if msg.contains(expected_error) - )); - } -} diff --git a/src/tests/integration/mod.rs b/src/tests/integration/mod.rs deleted file mode 100644 index 8c8a51a..0000000 --- a/src/tests/integration/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod optimism; diff --git a/src/tests/management.rs b/src/tests/management.rs deleted file mode 100644 index aa4a434..0000000 --- a/src/tests/management.rs +++ /dev/null @@ -1,244 +0,0 @@ -use crate::{with_docker_cleanup, DockerBuilder}; -use bollard::container::{ - Config, CreateContainerOptions, InspectContainerOptions, ListContainersOptions, -}; -use futures::TryStreamExt; -use std::{collections::HashMap, time::Duration}; -use uuid::Uuid; - -with_docker_cleanup!(test_network_management, async |test_id: &str| { - let builder = DockerBuilder::new().await.unwrap(); - let network_name = format!("test-network-{}", Uuid::new_v4()); - - let mut network_labels = HashMap::new(); - network_labels.insert("test_id".to_string(), test_id.to_string()); - - // Create network with retry - builder - .create_network_with_retry( - &network_name, - 5, - Duration::from_millis(100), - Some(network_labels), - ) - .await - .unwrap(); - - // Verify network exists - let networks = builder.list_networks().await.unwrap(); - assert!( - networks.contains(&network_name), - "Created network should be in the list" - ); -}); - -with_docker_cleanup!(test_volume_management, async |test_id: &str| { - let builder = DockerBuilder::new().await.unwrap(); - let volume_name = format!("test-volume-{}", test_id); - - // Create volume - builder.create_volume(&volume_name).await.unwrap(); - - // Verify volume exists - let volumes = builder.list_volumes().await.unwrap(); - assert!( - volumes.contains(&volume_name), - "Created volume should be in the list" - ); -}); - -with_docker_cleanup!(test_container_management, async |test_id: &str| { - let builder = DockerBuilder::new().await.unwrap(); - let unique_id = Uuid::new_v4(); - let container_name = format!("test-mgmt-{}-integration", unique_id); - println!("Starting test with container name: {}", container_name); - - // Pull image first to avoid potential "No such image" errors - println!("Pulling alpine image..."); - builder - .get_client() - .create_image( - Some(bollard::image::CreateImageOptions { - from_image: "alpine", - tag: "latest", - ..Default::default() - }), - None, - None, - ) - .try_collect::>() - .await - .unwrap(); - println!("Image pull complete"); - - // Create container first - let mut labels = HashMap::new(); - labels.insert("test_id", test_id); - - let container = builder - .get_client() - .create_container( - Some(CreateContainerOptions { - name: container_name.clone(), - platform: None, - }), - Config { - image: Some("alpine:latest"), - cmd: Some(vec!["sleep", "30"]), // Longer sleep to avoid timing issues - labels: Some(labels), - tty: Some(true), - ..Default::default() - }, - ) - .await - .unwrap(); - println!("Container created with ID: {}", container.id); - - // Add a small delay after creation to ensure Docker has fully registered the container - tokio::time::sleep(Duration::from_millis(100)).await; - - // Log container state after creation - if let Ok(inspect) = builder - .get_client() - .inspect_container(&container.id, None::) - .await - { - println!("Container state after creation: {:?}", inspect.state); - } else { - println!("Failed to inspect container after creation"); - } - - // Start container with retry logic - let mut start_retries = 3; - let mut start_success = false; - while start_retries > 0 && !start_success { - match builder - .get_client() - .start_container( - &container.id, - None::>, - ) - .await - { - Ok(_) => { - println!("Container started successfully"); - start_success = true; - } - Err(e) => { - println!( - "Failed to start container (attempt {}): {:?}", - 4 - start_retries, - e - ); - start_retries -= 1; - if start_retries > 0 { - tokio::time::sleep(Duration::from_millis(100)).await; - } - } - } - } - - assert!( - start_success, - "Failed to start container after multiple attempts" - ); - - // Add a small delay after starting to ensure Docker has fully started the container - tokio::time::sleep(Duration::from_millis(100)).await; - - // Log container state after start - if let Ok(inspect) = builder - .get_client() - .inspect_container(&container.id, None::) - .await - { - println!("Container state after start: {:?}", inspect.state); - } else { - println!("Failed to inspect container after start"); - } - - // Verify container is running with more retries - let mut filters = HashMap::new(); - filters.insert("id".to_string(), vec![container.id.as_str().to_string()]); - filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); - - let mut retries = 10; // Increased retries - let mut container_running = false; - while retries > 0 { - println!("Checking container running state, attempt {}", 11 - retries); - match builder - .get_client() - .list_containers(Some(ListContainersOptions { - all: true, // Check all containers, not just running ones - filters: filters.clone(), - ..Default::default() - })) - .await - { - Ok(containers) => { - if !containers.is_empty() { - container_running = true; - println!("Container confirmed running"); - break; - } - } - Err(e) => println!("Error checking container state: {:?}", e), - } - retries -= 1; - tokio::time::sleep(Duration::from_millis(200)).await; // Increased delay - } - - assert!(container_running, "Container should be running"); - - // Test exec in running container with retry - println!("Attempting to exec in container"); - let mut exec_retries = 3; - let mut exec_success = false; - while exec_retries > 0 && !exec_success { - match builder - .exec_in_container(&container.id, vec!["echo", "exec test"], None) - .await - { - Ok(output) => { - println!("Exec succeeded with output: {}", output); - assert!(output.contains("exec test")); - exec_success = true; - } - Err(e) => { - println!( - "Exec failed with error (attempt {}): {:?}", - 4 - exec_retries, - e - ); - exec_retries -= 1; - if exec_retries > 0 { - tokio::time::sleep(Duration::from_millis(100)).await; - } - } - } - } - - assert!( - exec_success, - "Failed to exec in container after multiple attempts" - ); - - // Try to get logs with retry - println!("Attempting to get container logs"); - let mut log_retries = 3; - while log_retries > 0 { - match builder.get_container_logs(&container.id).await { - Ok(logs) => { - println!("Successfully retrieved logs: {}", logs); - break; - } - Err(e) => { - println!("Failed to get logs (attempt {}): {:?}", 4 - log_retries, e); - log_retries -= 1; - if log_retries > 0 { - tokio::time::sleep(Duration::from_millis(100)).await; - } - } - } - } -}); diff --git a/src/tests/mod.rs b/src/tests/mod.rs deleted file mode 100644 index de8825b..0000000 --- a/src/tests/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Parser tests always available -pub mod compose; -pub mod docker_file; -pub mod fixtures; -pub mod utils; - -// Deployment tests only with deploy feature -mod compose_health; -mod compose_requirements; -mod integration; -mod management; - -use std::path::PathBuf; - -pub(crate) fn fixtures_path() -> PathBuf { - PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures") -} diff --git a/src/tests/utils.rs b/tests/common/mod.rs similarity index 89% rename from src/tests/utils.rs rename to tests/common/mod.rs index ce5cefe..8e93295 100644 --- a/src/tests/utils.rs +++ b/tests/common/mod.rs @@ -3,13 +3,25 @@ use bollard::network::ListNetworksOptions; use bollard::volume::ListVolumesOptions; use bollard::Docker; use std::collections::HashMap; +use std::future::Future; +use std::pin::Pin; +use std::process::Command; use std::sync::mpsc; use std::thread::{self, JoinHandle}; use uuid::Uuid; +#[allow(dead_code)] +pub fn is_docker_running() -> bool { + Command::new("docker") + .arg("info") + .output() + .map(|output| output.status.success()) + .unwrap_or(false) +} + pub struct DockerTestContext { - pub(crate) client: Docker, - pub(crate) test_id: String, + client: Docker, + test_id: String, } impl DockerTestContext { @@ -230,28 +242,24 @@ impl Drop for TestGuard { } } -/// Helper macro to create a test context and ensure cleanup -#[macro_export] -macro_rules! with_docker_cleanup { - ($test_name:ident, $test_body:expr) => { - #[tokio::test] - async fn $test_name() -> Result<(), Box> { - let guard = $crate::tests::utils::TestGuard::new(); - let test_id = guard.get_test_id().to_string(); +/// Helper to create a test context and ensure cleanup +pub async fn with_docker_cleanup(mut test_body: F) -> color_eyre::Result<()> +where + F: FnMut(String) -> Pin> + Send + 'static>>, +{ + let guard = TestGuard::new(); + let test_id = guard.get_test_id().to_string(); - // Clean up any leftover resources using the same test_id - let client = bollard::Docker::connect_with_local_defaults().unwrap(); - let ctx = $crate::tests::utils::DockerTestContext { - client, - test_id: test_id.clone(), - }; - ctx.cleanup().await; + // Clean up any leftover resources using the same test_id + let client = Docker::connect_with_local_defaults()?; + let ctx = DockerTestContext { + client, + test_id: test_id.clone(), + }; + ctx.cleanup().await; - // Run the test with the guard's test_id - let test_fn = $test_body; - test_fn(test_id.as_str()).await; + // Run the test with the guard's test_id + test_body(test_id).await?; - Ok(()) - } - }; + Ok(()) } diff --git a/tests/compose.rs b/tests/compose.rs new file mode 100644 index 0000000..3d7125d --- /dev/null +++ b/tests/compose.rs @@ -0,0 +1,150 @@ +mod common; + +use bollard::container::ListContainersOptions; +use color_eyre::Result; +use common::{is_docker_running, with_docker_cleanup}; +use dockworker::{BuildConfig, ComposeConfig, DockerBuilder, Service}; +use std::{collections::HashMap, time::Duration}; + +#[tokio::test] +async fn test_compose_deployment() -> Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + if !is_docker_running() { + println!("Skipping test: Docker is not running"); + return Ok(()); + } + + let builder = DockerBuilder::new().await?; + let network_name = format!("test-network-{}", test_id); + + let mut labels = HashMap::new(); + labels.insert("test_id".to_string(), test_id.to_string()); + + // Create network with retry mechanism + builder + .create_network_with_retry(&network_name, 3, Duration::from_secs(2), Some(labels)) + .await?; + + // Create a simple test compose config + let mut services = HashMap::new(); + let mut env = HashMap::new(); + env.insert("TEST".to_string(), "value".to_string()); + + let mut labels = HashMap::new(); + labels.insert("test_id".to_string(), test_id.to_string()); + + let service_name = format!("test-service-{}", test_id); + services.insert( + service_name, + Service { + image: Some("alpine:latest".to_string()), + ports: Some(vec!["8080:80".to_string()]), + environment: Some(env.into()), + volumes: None, + networks: Some(vec![network_name.clone()]), + labels: Some(labels), + ..Service::default() + }, + ); + + let mut config = ComposeConfig { + version: "3".to_string(), + services, + volumes: HashMap::new(), + }; + + let container_ids = builder.deploy_compose(&mut config).await?; + assert_eq!(container_ids.len(), 1); + + // Add a small delay to ensure Docker has time to start the container + tokio::time::sleep(Duration::from_millis(100)).await; + + // Verify containers are running + for (_, container_id) in container_ids { + let mut filters = HashMap::new(); + filters.insert("id".to_string(), vec![container_id.clone()]); + filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); + + let mut retries = 5; + let mut containers_found = false; + while retries > 0 { + match builder + .get_client() + .list_containers(Some(ListContainersOptions { + all: true, + filters: filters.clone(), + ..Default::default() + })) + .await + { + Ok(containers) => { + if containers.len() == 1 + && containers[0].id.as_ref().unwrap() == &container_id + { + containers_found = true; + break; + } + } + Err(e) => println!("Error listing containers: {:?}", e), + } + retries -= 1; + tokio::time::sleep(Duration::from_millis(100)).await; + } + + assert!(containers_found, "Container not found or not running"); + } + + Ok(()) + }) + }) + .await +} + +#[tokio::test] +async fn test_compose_with_build() -> Result<()> { + with_docker_cleanup(|_test_id| { + Box::pin(async move { + let builder = DockerBuilder::new().await.unwrap(); + + // Create a compose config with build context + let mut services = HashMap::new(); + services.insert( + "test-build-service".to_string(), + Service { + image: None, + build: Some(BuildConfig { + context: "./".to_string(), + dockerfile: Some("Dockerfile".to_string()), + }), + ports: None, + environment: None, + volumes: None, + networks: None, + requirements: None, + depends_on: None, + healthcheck: None, + restart: None, + command: None, + user: None, + labels: None, + platform: None, + env_file: None, + }, + ); + + let mut config = ComposeConfig { + version: "3".to_string(), + services, + volumes: HashMap::new(), + }; + + let result = builder.deploy_compose(&mut config).await; + // This should fail because we don't have a Dockerfile in the current directory + assert!(result.is_err()); + + Ok(()) + }) + }) + .await +} diff --git a/tests/containers.rs b/tests/containers.rs new file mode 100644 index 0000000..700dd48 --- /dev/null +++ b/tests/containers.rs @@ -0,0 +1,214 @@ +mod common; + +use bollard::container::{ + Config, CreateContainerOptions, InspectContainerOptions, ListContainersOptions, +}; +use color_eyre::Result; +use common::with_docker_cleanup; +use dockworker::DockerBuilder; +use futures_util::TryStreamExt; +use std::collections::HashMap; +use std::time::Duration; +use uuid::Uuid; + +#[tokio::test] +async fn test_container_management() -> Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + let builder = DockerBuilder::new().await?; + let unique_id = Uuid::new_v4(); + let container_name = format!("test-mgmt-{}-integration", unique_id); + println!("Starting test with container name: {}", container_name); + + // Pull image first to avoid potential "No such image" errors + println!("Pulling alpine image..."); + builder + .get_client() + .create_image( + Some(bollard::image::CreateImageOptions { + from_image: "alpine", + tag: "latest", + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await?; + println!("Image pull complete"); + + // Create container first + let mut labels = HashMap::new(); + labels.insert("test_id", &*test_id); + + let container = builder + .get_client() + .create_container( + Some(CreateContainerOptions { + name: container_name.clone(), + platform: None, + }), + Config { + image: Some("alpine:latest"), + cmd: Some(vec!["sleep", "30"]), // Longer sleep to avoid timing issues + labels: Some(labels), + tty: Some(true), + ..Default::default() + }, + ) + .await?; + println!("Container created with ID: {}", container.id); + + // Add a small delay after creation to ensure Docker has fully registered the container + tokio::time::sleep(Duration::from_millis(100)).await; + + // Log container state after creation + if let Ok(inspect) = builder + .get_client() + .inspect_container(&container.id, None::) + .await + { + println!("Container state after creation: {:?}", inspect.state); + } else { + println!("Failed to inspect container after creation"); + } + + // Start container with retry logic + let mut start_retries = 3; + let mut start_success = false; + while start_retries > 0 && !start_success { + match builder + .get_client() + .start_container( + &container.id, + None::>, + ) + .await + { + Ok(_) => { + println!("Container started successfully"); + start_success = true; + } + Err(e) => { + println!( + "Failed to start container (attempt {}): {:?}", + 4 - start_retries, + e + ); + start_retries -= 1; + if start_retries > 0 { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + } + + assert!( + start_success, + "Failed to start container after multiple attempts" + ); + + // Add a small delay after starting to ensure Docker has fully started the container + tokio::time::sleep(Duration::from_millis(100)).await; + + // Log container state after start + if let Ok(inspect) = builder + .get_client() + .inspect_container(&container.id, None::) + .await + { + println!("Container state after start: {:?}", inspect.state); + } else { + println!("Failed to inspect container after start"); + } + + // Verify container is running with more retries + let mut filters = HashMap::new(); + filters.insert("id".to_string(), vec![container.id.as_str().to_string()]); + filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); + + let mut retries = 10; // Increased retries + let mut container_running = false; + while retries > 0 { + println!("Checking container running state, attempt {}", 11 - retries); + match builder + .get_client() + .list_containers(Some(ListContainersOptions { + all: true, // Check all containers, not just running ones + filters: filters.clone(), + ..Default::default() + })) + .await + { + Ok(containers) => { + if !containers.is_empty() { + container_running = true; + println!("Container confirmed running"); + break; + } + } + Err(e) => println!("Error checking container state: {:?}", e), + } + retries -= 1; + tokio::time::sleep(Duration::from_millis(200)).await; // Increased delay + } + + assert!(container_running, "Container should be running"); + + // Test exec in running container with retry + println!("Attempting to exec in container"); + let mut exec_retries = 3; + let mut exec_success = false; + while exec_retries > 0 && !exec_success { + match builder + .exec_in_container(&container.id, vec!["echo", "exec test"], None) + .await + { + Ok(output) => { + println!("Exec succeeded with output: {}", output); + assert!(output.contains("exec test")); + exec_success = true; + } + Err(e) => { + println!( + "Exec failed with error (attempt {}): {:?}", + 4 - exec_retries, + e + ); + exec_retries -= 1; + if exec_retries > 0 { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + } + + assert!( + exec_success, + "Failed to exec in container after multiple attempts" + ); + + // Try to get logs with retry + println!("Attempting to get container logs"); + let mut log_retries = 3; + while log_retries > 0 { + match builder.get_container_logs(&container.id).await { + Ok(logs) => { + println!("Successfully retrieved logs: {}", logs); + break; + } + Err(e) => { + println!("Failed to get logs (attempt {}): {:?}", 4 - log_retries, e); + log_retries -= 1; + if log_retries > 0 { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + } + + Ok(()) + }) + }) + .await +} diff --git a/tests/dockerfile.rs b/tests/dockerfile.rs new file mode 100644 index 0000000..581e7e0 --- /dev/null +++ b/tests/dockerfile.rs @@ -0,0 +1,137 @@ +mod common; + +use bollard::container::ListContainersOptions; +use common::{is_docker_running, with_docker_cleanup}; +use dockworker::config::{DockerCommand, DockerfileConfig}; +use dockworker::DockerBuilder; +use futures_util::TryStreamExt; +use std::collections::HashMap; +use std::time::Duration; + +#[tokio::test] +async fn test_dockerfile_deployment() -> color_eyre::Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + if !is_docker_running() { + println!("Skipping test: Docker is not running"); + return Ok(()); + } + + let builder = DockerBuilder::new().await?; + let network_name = format!("test-network-{}", test_id); + + let mut network_labels = HashMap::new(); + network_labels.insert("test_id".to_string(), test_id.to_string()); + + // Create network with retry mechanism + builder + .create_network_with_retry( + &network_name, + 3, + Duration::from_secs(2), + Some(network_labels), + ) + .await?; + + // Pull alpine image first + println!("Pulling alpine image..."); + builder + .get_client() + .create_image( + Some(bollard::image::CreateImageOptions { + from_image: "alpine", + tag: "latest", + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await?; + println!("Image pull complete"); + + // Create a simple test Dockerfile config + let config = DockerfileConfig { + base_image: "alpine:latest".to_string(), + commands: vec![ + DockerCommand::Run { + command: "echo 'test' > /test.txt".to_string(), + }, + DockerCommand::Label { + labels: { + let mut labels = HashMap::new(); + labels.insert("test_id".to_string(), test_id.to_string()); + labels + }, + }, + DockerCommand::Cmd { + command: vec!["sleep".to_string(), "30".to_string()], // Keep container running + }, + ], + }; + + let tag = format!("test-dockerfile-{}", test_id); + println!("Building image with tag: {}", tag); + + // Deploy using our config with network + let container_id = builder + .deploy_dockerfile(&config, &tag, None, None, Some(network_name.clone()), None) + .await?; + println!("Container created with ID: {}", container_id); + + // Add a small delay after creation + tokio::time::sleep(Duration::from_millis(100)).await; + + // Verify container is running + let mut filters = std::collections::HashMap::new(); + filters.insert("id".to_string(), vec![container_id.clone()]); + filters.insert("label".to_string(), vec![format!("test_id={}", test_id)]); + + let mut retries = 5; + let mut container_running = false; + while retries > 0 { + println!("Checking container state, attempt {}", 6 - retries); + if let Ok(containers) = builder + .get_client() + .list_containers(Some(ListContainersOptions { + all: true, + filters: filters.clone(), + ..Default::default() + })) + .await + { + if !containers.is_empty() { + println!("Container found and running"); + container_running = true; + break; + } else { + println!("No containers found matching filters"); + } + } else { + println!("Error listing containers"); + } + retries -= 1; + tokio::time::sleep(Duration::from_millis(500)).await; + } + + // If container not running, get more details + if !container_running { + println!("Container not found with filters. Checking container inspect..."); + if let Ok(inspect) = builder + .get_client() + .inspect_container(&container_id, None) + .await + { + println!("Container inspect result: {:?}", inspect); + } else { + println!("Failed to inspect container"); + } + } + + assert!(container_running, "Container should be running"); + + Ok(()) + }) + }) + .await +} diff --git a/tests/healthcheck.rs b/tests/healthcheck.rs new file mode 100644 index 0000000..826bf49 --- /dev/null +++ b/tests/healthcheck.rs @@ -0,0 +1,117 @@ +mod common; + +use color_eyre::Result; +use common::{is_docker_running, with_docker_cleanup}; +use dockworker::config::{HealthCheck, Method}; +use dockworker::{ComposeConfig, DockerBuilder, Service}; +use futures_util::TryStreamExt; +use std::{collections::HashMap, time::Duration}; + +#[tokio::test] +async fn test_healthcheck() -> Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + if !is_docker_running() { + println!("Skipping test: Docker is not running"); + return Ok(()); + } + + let builder = DockerBuilder::new().await.unwrap(); + let network_name = format!("test-network-{}", test_id); + + let mut network_labels = HashMap::new(); + network_labels.insert("test_id".to_string(), test_id.to_string()); + + // Create network with retry mechanism + builder + .create_network_with_retry( + &network_name, + 3, + Duration::from_secs(2), + Some(network_labels), + ) + .await?; + + let service_name = format!("healthy-service-{}", test_id); + + // Pull nginx image first + builder + .get_client() + .create_image( + Some(bollard::image::CreateImageOptions { + from_image: "nginx", + tag: "latest", + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await?; + + // Create a service with healthcheck + let mut services = HashMap::new(); + let mut labels = HashMap::new(); + labels.insert("test_id".to_string(), test_id.to_string()); + + services.insert( + service_name.clone(), + Service { + image: Some("nginx:latest".to_string()), + healthcheck: Some(HealthCheck { + endpoint: "http://localhost/".to_string(), + method: Method::Get, + expected_status: 200, + body: None, + interval: Duration::from_secs(1), + timeout: Duration::from_secs(3), + retries: 3, + }), + ports: Some(vec!["8080:80".to_string()]), + networks: Some(vec![network_name.clone()]), + labels: Some(labels), + ..Default::default() + }, + ); + + let mut config = ComposeConfig { + version: "3".to_string(), + services, + volumes: HashMap::new(), + }; + + let container_ids = builder.deploy_compose(&mut config).await?; + let container_id = container_ids.get(&service_name).unwrap(); + + // Wait for container to be healthy + builder.wait_for_container(container_id).await?; + + // Verify healthcheck configuration + let inspect = builder + .get_client() + .inspect_container(container_id, None) + .await?; + + if let Some(config) = inspect.config { + if let Some(healthcheck) = config.healthcheck { + assert_eq!( + healthcheck.test, + Some(vec![ + "CMD-SHELL".to_string(), + format!( + "curl -X GET {} -s -f -o /dev/null -w '%{{http_code}}' | grep -q {}", + "http://localhost/", "200" + ) + ]) + ); + assert_eq!(healthcheck.interval, Some(1_000_000_000)); + assert_eq!(healthcheck.timeout, Some(3_000_000_000)); + assert_eq!(healthcheck.retries, Some(3)); + } + } + + Ok(()) + }) + }) + .await +} diff --git a/tests/net.rs b/tests/net.rs new file mode 100644 index 0000000..0daadf6 --- /dev/null +++ b/tests/net.rs @@ -0,0 +1,40 @@ +mod common; + +use common::with_docker_cleanup; +use dockworker::DockerBuilder; +use std::collections::HashMap; +use std::time::Duration; +use uuid::Uuid; + +#[tokio::test] +async fn test_network_management() -> color_eyre::Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + let builder = DockerBuilder::new().await?; + let network_name = format!("test-network-{}", Uuid::new_v4()); + + let mut network_labels = HashMap::new(); + network_labels.insert("test_id".to_string(), test_id.to_string()); + + // Create network with retry + builder + .create_network_with_retry( + &network_name, + 5, + Duration::from_millis(100), + Some(network_labels), + ) + .await?; + + // Verify network exists + let networks = builder.list_networks().await?; + assert!( + networks.contains(&network_name), + "Created network should be in the list" + ); + + Ok(()) + }) + }) + .await +} diff --git a/src/tests/integration/optimism.rs b/tests/optimism.rs similarity index 68% rename from src/tests/integration/optimism.rs rename to tests/optimism.rs index 4fe6963..c511d86 100644 --- a/src/tests/integration/optimism.rs +++ b/tests/optimism.rs @@ -1,14 +1,17 @@ -use crate::config::compose::ComposeConfig; -use crate::parser::compose::ComposeParser; -use crate::DockerBuilder; +mod common; + +use color_eyre::Result; +use common::with_docker_cleanup; +use dockworker::parser::ComposeParser; +use dockworker::{ComposeConfig, DockerBuilder, DockerError}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::Duration; use tokio::fs; use uuid::Uuid; -const OPTIMISM_FIXTURES: &str = "fixtures/simple-optimism-node"; -const TEST_BASE_DIR: &str = "test-data"; +const OPTIMISM_FIXTURES: &str = "./fixtures/simple-optimism-node"; +const TEST_BASE_DIR: &str = "./test-data"; pub struct OptimismTestContext { pub builder: DockerBuilder, @@ -17,7 +20,7 @@ pub struct OptimismTestContext { } impl OptimismTestContext { - pub async fn new(test_id: &str) -> Result { + pub async fn new(test_id: &str) -> Result { let builder = DockerBuilder::new().await?; // Create test directory under the common test base @@ -27,10 +30,12 @@ impl OptimismTestContext { // Set up initial config let compose_path = PathBuf::from(OPTIMISM_FIXTURES).join("docker-compose.yml"); - let env_path = PathBuf::from("src/tests/integration/optimism_env.env"); + let env_path = PathBuf::from("./tests/optimism_env.env"); // Parse config with environment variables - let mut config = ComposeParser::from_file_with_env(&compose_path, &env_path).await?; + let mut config = ComposeParser::new() + .env_file(env_path) + .parse_from_path(&compose_path)?; // Add test_id label to each service for service in config.services.values_mut() { @@ -56,7 +61,7 @@ impl OptimismTestContext { }) } - pub async fn deploy(&mut self) -> Result, crate::error::DockerError> { + pub async fn deploy(&mut self) -> Result> { // First ensure all directories exist self.setup_directories().await?; @@ -65,10 +70,11 @@ impl OptimismTestContext { .test_dir .join("docker/dockerfiles/Dockerfile.bedrock-init"); if !dockerfile_path.exists() { - return Err(crate::error::DockerError::ValidationError(format!( + return Err(DockerError::ValidationError(format!( "Dockerfile not found at expected path: {}", dockerfile_path.display() - ))); + )) + .into()); } println!("Found Dockerfile at: {}", dockerfile_path.display()); @@ -76,18 +82,16 @@ impl OptimismTestContext { self.builder .deploy_compose_with_base_dir(&mut self.config, self.test_dir.clone()) .await + .map_err(Into::into) } - pub async fn setup_directories(&self) -> Result<(), crate::error::DockerError> { + pub async fn setup_directories(&self) -> Result<()> { println!("\n=== Setting up test directories ==="); println!("Test directory: {}", self.test_dir.display()); // Create test directory fs::create_dir_all(&self.test_dir).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( - "Failed to create test directory: {}", - e - )) + DockerError::ValidationError(format!("Failed to create test directory: {}", e)) })?; // Define directories to copy from fixtures @@ -114,7 +118,7 @@ impl OptimismTestContext { // Create parent directory if let Some(parent) = dst_dir.parent() { fs::create_dir_all(parent).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( + DockerError::ValidationError(format!( "Failed to create parent directory {}: {}", parent.display(), e @@ -131,7 +135,7 @@ impl OptimismTestContext { .status() .await .map_err(|e| { - crate::error::DockerError::ValidationError(format!( + DockerError::ValidationError(format!( "Failed to copy directory {}: {}", dir, e )) @@ -142,7 +146,7 @@ impl OptimismTestContext { src_dir.display() ); fs::create_dir_all(&dst_dir).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( + DockerError::ValidationError(format!( "Failed to create directory {}: {}", dst_dir.display(), e @@ -159,7 +163,7 @@ impl OptimismTestContext { let dir_path = self.test_dir.join(dir); println!("Creating directory: {}", dir_path.display()); fs::create_dir_all(&dir_path).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( + DockerError::ValidationError(format!( "Failed to create directory {}: {}", dir_path.display(), e @@ -175,33 +179,24 @@ impl OptimismTestContext { println!(" From: {}", compose_src.display()); println!(" To: {}", compose_dst.display()); fs::copy(&compose_src, &compose_dst).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( - "Failed to copy docker-compose.yml: {}", - e - )) + DockerError::ValidationError(format!("Failed to copy docker-compose.yml: {}", e)) })?; // Copy optimism_env.env to .env - let env_src = PathBuf::from("src/tests/integration/optimism_env.env"); + let env_src = PathBuf::from("tests/optimism_env.env"); let env_dst = self.test_dir.join(".env"); println!("Copying env file:"); println!(" From: {}", env_src.display()); println!(" To: {}", env_dst.display()); fs::copy(&env_src, &env_dst).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( - "Failed to copy optimism_env.env to .env: {}", - e - )) + DockerError::ValidationError(format!("Failed to copy optimism_env.env to .env: {}", e)) })?; // Set up Docker build context println!("\n=== Setting up Docker build context ==="); let dockerfiles_dir = self.test_dir.join("docker/dockerfiles"); fs::create_dir_all(&dockerfiles_dir).await.map_err(|e| { - crate::error::DockerError::ValidationError(format!( - "Failed to create dockerfiles directory: {}", - e - )) + DockerError::ValidationError(format!("Failed to create dockerfiles directory: {}", e)) })?; // Copy Dockerfile with explicit fs::copy @@ -216,7 +211,7 @@ impl OptimismTestContext { fs::copy(&dockerfile_src, &dockerfile_dst) .await .map_err(|e| { - crate::error::DockerError::ValidationError(format!( + DockerError::ValidationError(format!( "Failed to copy Dockerfile.bedrock-init: {}", e )) @@ -246,55 +241,57 @@ impl OptimismTestContext { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::with_docker_cleanup; - - with_docker_cleanup!(test_optimism_node_deployment, async |test_id: &str| { - let mut ctx = OptimismTestContext::new(test_id).await.unwrap(); - - // Deploy all services and verify they're running - let container_ids = ctx.deploy().await.unwrap(); - - // Verify services are running - for (service_name, container_id) in container_ids { - println!( - "Verifying container for service {} with id {}", - service_name, container_id - ); - - // Verify container is running - let mut retries = 5; - let mut container_running = false; - while retries > 0 { - if let Ok(containers) = ctx - .builder - .get_client() - .list_containers(Some(bollard::container::ListContainersOptions { - all: true, - filters: { - let mut filters = HashMap::new(); - filters.insert("id".to_string(), vec![container_id.clone()]); - filters - }, - ..Default::default() - })) - .await - { - if !containers.is_empty() { - container_running = true; - break; +#[tokio::test] +async fn test_optimism_node_deployment() -> Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + let mut ctx = OptimismTestContext::new(&test_id).await?; + + // Deploy all services and verify they're running + let container_ids = ctx.deploy().await?; + + // Verify services are running + for (service_name, container_id) in container_ids { + println!( + "Verifying container for service {} with id {}", + service_name, container_id + ); + + // Verify container is running + let mut retries = 5; + let mut container_running = false; + while retries > 0 { + if let Ok(containers) = ctx + .builder + .get_client() + .list_containers(Some(bollard::container::ListContainersOptions { + all: true, + filters: { + let mut filters = HashMap::new(); + filters.insert("id".to_string(), vec![container_id.clone()]); + filters + }, + ..Default::default() + })) + .await + { + if !containers.is_empty() { + container_running = true; + break; + } } + retries -= 1; + tokio::time::sleep(Duration::from_millis(500)).await; } - retries -= 1; - tokio::time::sleep(Duration::from_millis(500)).await; + assert!( + container_running, + "Container for service {} should be running", + service_name + ); } - assert!( - container_running, - "Container for service {} should be running", - service_name - ); - } - }); + + Ok(()) + }) + }) + .await } diff --git a/src/tests/integration/optimism_env.env b/tests/optimism_env.env similarity index 100% rename from src/tests/integration/optimism_env.env rename to tests/optimism_env.env diff --git a/tests/resource_limits.rs b/tests/resource_limits.rs new file mode 100644 index 0000000..10ce4d1 --- /dev/null +++ b/tests/resource_limits.rs @@ -0,0 +1,106 @@ +mod common; + +use color_eyre::Result; +use common::{is_docker_running, with_docker_cleanup}; +use dockworker::config::{parse_memory_string, SystemRequirements}; +use dockworker::{ComposeConfig, DockerBuilder, Service}; +use std::collections::HashMap; +use std::time::Duration; + +#[tokio::test] +async fn test_resource_limits() -> Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + if !is_docker_running() { + println!("Skipping test: Docker is not running"); + return Ok(()); + } + + let builder = DockerBuilder::new().await?; + let network_name = format!("test-network-{}", test_id); + + let mut network_labels = HashMap::new(); + network_labels.insert("test_id".to_string(), test_id.to_string()); + + // Create network with retry mechanism + builder + .create_network_with_retry( + &network_name, + 3, + Duration::from_secs(2), + Some(network_labels), + ) + .await?; + + let service_name = format!("test-service-{}", test_id); + + // Create a service with resource limits + let mut services = HashMap::new(); + let mut labels = HashMap::new(); + labels.insert("test_id".to_string(), test_id.to_string()); + + services.insert( + service_name.clone(), + Service { + image: Some("alpine:latest".to_string()), + command: Some(vec!["sleep".to_string(), "30".to_string()]), + requirements: Some(SystemRequirements { + min_memory_gb: 1, + min_disk_gb: 1, + min_bandwidth_mbps: 100, + required_ports: vec![], + data_directory: "/tmp".to_string(), + cpu_limit: Some(0.5), + memory_limit: Some("512M".to_string()), + memory_swap: Some("1G".to_string()), + memory_reservation: Some("256M".to_string()), + cpu_shares: Some(512), + cpuset_cpus: Some("0,1".to_string()), + }), + networks: Some(vec![network_name.clone()]), + labels: Some(labels), + ..Default::default() + }, + ); + + let mut config = ComposeConfig { + version: "3".to_string(), + services, + volumes: HashMap::new(), + }; + + let container_ids = builder.deploy_compose(&mut config).await?; + let container_id = container_ids.get(&service_name).unwrap(); + + // Verify container configuration + let inspect = builder + .get_client() + .inspect_container(container_id, None) + .await?; + + if let Some(host_config) = inspect.host_config { + // Verify memory limits + assert_eq!( + host_config.memory, + Some(parse_memory_string("512M").unwrap()) + ); + assert_eq!( + host_config.memory_swap, + Some(parse_memory_string("1G").unwrap()) + ); + assert_eq!( + host_config.memory_reservation, + Some(parse_memory_string("256M").unwrap()) + ); + + // Verify CPU limits + assert_eq!(host_config.nano_cpus, Some((0.5 * 1e9) as i64)); + assert_eq!(host_config.cpu_shares, Some(512)); + assert_eq!(host_config.cpuset_cpus, Some("0,1".to_string())); + } + + Ok(()) + }) + }) + .await +} diff --git a/tests/volumes.rs b/tests/volumes.rs new file mode 100644 index 0000000..ce743aa --- /dev/null +++ b/tests/volumes.rs @@ -0,0 +1,27 @@ +mod common; + +use common::with_docker_cleanup; +use dockworker::DockerBuilder; + +#[tokio::test] +async fn test_volume_management() -> color_eyre::Result<()> { + with_docker_cleanup(|test_id| { + Box::pin(async move { + let builder = DockerBuilder::new().await?; + let volume_name = format!("test-volume-{}", test_id); + + // Create volume + builder.create_volume(&volume_name).await?; + + // Verify volume exists + let volumes = builder.list_volumes().await?; + assert!( + volumes.contains(&volume_name), + "Created volume should be in the list" + ); + + Ok(()) + }) + }) + .await +}