diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e18d090..c80bb3f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,6 +17,8 @@ jobs: - uses: actions-rs/toolchain@v1 with: toolchain: stable + - name: Pull test images # TODO: pull image in test itself + run: docker pull alpine:latest - name: Run tests run: cargo test --lib diff --git a/crates/bbuilder/bin/main.rs b/crates/bbuilder/bin/main.rs index a3b2390..8915167 100644 --- a/crates/bbuilder/bin/main.rs +++ b/crates/bbuilder/bin/main.rs @@ -37,7 +37,11 @@ async fn main() -> eyre::Result<()> { let cli = Cli::parse(); match cli.command { - Commands::Run { filename, name, dry_run } => run_command(filename, name, cli.config_folder, dry_run).await?, + Commands::Run { + filename, + name, + dry_run, + } => run_command(filename, name, cli.config_folder, dry_run).await?, } Ok(()) diff --git a/crates/runtime-docker-compose/src/deployment_watcher.rs b/crates/runtime-docker-compose/src/deployment_watcher.rs new file mode 100644 index 0000000..fd2005f --- /dev/null +++ b/crates/runtime-docker-compose/src/deployment_watcher.rs @@ -0,0 +1,318 @@ +use bollard::Docker; +use bollard::query_parameters::EventsOptionsBuilder; +use futures_util::stream::StreamExt; +use std::collections::HashMap; +use tokio::sync::mpsc; + +use crate::compose::DockerComposeSpec; + +/// Single deployment watcher +pub struct DeploymentState { + pub services: Vec, +} + +pub struct ServiceStatus { + pub name: String, + pub container_ip: Option, + pub state: ContainerState, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ContainerState { + Pending, + PullingImage, + Running, + Healthy, + Unhealthy, + Completed, + Failed(String), +} + +#[derive(Debug, Default, Clone)] +pub enum ContainerEvent { + #[default] + Pending, + PullingImage, + Started { + container_ip: String, + }, + Died { + exit_code: i64, + }, +} + +#[derive(Debug)] +pub struct DockerEventMessage { + pub container_name: String, + pub event: ContainerEvent, +} + +impl DeploymentState { + pub fn new(spec: &DockerComposeSpec) -> Self { + let mut services = Vec::new(); + + for (service_name, _) in &spec.services { + services.push(ServiceStatus { + name: service_name.clone(), + container_ip: None, + state: ContainerState::Pending, + }); + } + + Self { services } + } + + pub fn handle_container_event(&mut self, container_name: String, event: ContainerEvent) { + let service = self.services.iter_mut().find(|s| s.name == container_name); + + if let Some(service) = service { + match event { + ContainerEvent::Pending => {} + ContainerEvent::PullingImage => { + service.state = ContainerState::PullingImage; + } + ContainerEvent::Started { container_ip } => { + service.state = ContainerState::Running; + service.container_ip = Some(container_ip.clone()); + } + ContainerEvent::Died { exit_code } => { + service.state = + ContainerState::Failed(format!("Died with exit code: {}", exit_code)); + } + } + } + } +} + +/// Listen to Docker events and send them through the channel +pub async fn listen_docker_events( + tx: mpsc::UnboundedSender, + filters: HashMap<&str, Vec<&str>>, +) { + let docker = Docker::connect_with_local_defaults().unwrap(); + let options = EventsOptionsBuilder::new().filters(&filters).build(); + + let mut events = docker.events(Some(options)); + tracing::debug!("Listening for container events..."); + + while let Some(event_result) = events.next().await { + match event_result { + Ok(event) => { + tracing::debug!("Event: {:?}", event.action); + if let Some(actor) = event.actor { + let container_id = actor.id.clone(); + tracing::debug!(" Container ID: {:?}", container_id); + if let Some(attrs) = actor.attributes { + if let Some(name) = attrs.get("name") { + tracing::debug!(" Container Name: {}", name); + + // Map Docker event to ContainerEvent + let container_event = match event.action.as_deref() { + Some("start") => { + // Extract container IP by inspecting the container + let container_ip = if let Some(id) = container_id { + extract_container_ip(&docker, &id).await + } else { + String::new() + }; + + Some(ContainerEvent::Started { container_ip }) + } + Some("die") => { + let exit_code = attrs + .get("exitCode") + .and_then(|s| s.parse::().ok()) + .unwrap_or(1); + Some(ContainerEvent::Died { exit_code }) + } + _ => None, + }; + + if let Some(event) = container_event { + let _ = tx.send(DockerEventMessage { + container_name: name.clone(), + event, + }); + } + } + } + } + } + Err(e) => tracing::error!("Error: {}", e), + } + } +} + +/// Extract the container IP address from Docker inspect +async fn extract_container_ip(docker: &Docker, container_id: &str) -> String { + match docker + .inspect_container( + container_id, + None::, + ) + .await + { + Ok(info) => { + if let Some(network_settings) = info.network_settings { + if let Some(networks) = network_settings.networks { + // Try to get IP from the first available network + for (_, endpoint) in networks { + if let Some(ip) = endpoint.ip_address { + if !ip.is_empty() { + return ip; + } + } + } + } + } + tracing::warn!("Could not extract IP for container {}", container_id); + String::new() + } + Err(e) => { + tracing::error!("Failed to inspect container {}: {}", container_id, e); + String::new() + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::*; + use bollard::models::ContainerCreateBody; + use bollard::query_parameters::{ + CreateContainerOptions, RemoveContainerOptions, StartContainerOptions, + }; + use futures_util::future::BoxFuture; + use std::collections::HashMap; + + async fn cleanup_container(docker: &Docker, name: &str) { + let _ = docker + .remove_container( + name, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await; + } + + async fn run_test_container( + container_name: &str, + args: Vec<&str>, + ) -> impl FnOnce() -> BoxFuture<'static, ()> { + let docker = Docker::connect_with_local_defaults().unwrap(); + + let image = "alpine:latest".to_string(); + cleanup_container(&docker, container_name).await; + + let mut labels = HashMap::new(); + labels.insert("bbuilder".to_string(), "true".to_string()); + + let config = ContainerCreateBody { + image: Some(image), + cmd: Some(args.into_iter().map(String::from).collect()), + labels: Some(labels), + ..Default::default() + }; + + let create_options = CreateContainerOptions { + name: Some(container_name.to_string()), + ..Default::default() + }; + + docker + .create_container(Some(create_options), config) + .await + .unwrap(); + + // Start the container + docker + .start_container(container_name, None::) + .await + .unwrap(); + + let docker = docker.clone(); + let container_name = container_name.to_string(); + + move || { + Box::pin(async move { + cleanup_container(&docker, &container_name).await; + }) + } + } + + #[tokio::test] + async fn test_container_start_and_dies_naturally() { + let container_name = "test-container-start-and-dies"; + let filters = HashMap::from([("container", vec![container_name])]); + + let (event_tx, mut event_rx) = mpsc::unbounded_channel::(); + tokio::spawn(listen_docker_events(event_tx, filters)); + + let args = vec!["sh", "-c", "sleep 1"]; + let cleanup = run_test_container(container_name, args).await; + + // wait enough time to finish + tokio::time::sleep(Duration::from_secs(2)).await; + + let start_event = event_rx.recv().await.expect("start event"); + assert!(matches!(start_event.event, ContainerEvent::Started { .. })); + + let die_event = event_rx.recv().await.expect("start event"); + assert!(matches!( + die_event.event, + ContainerEvent::Died { exit_code: 0 } + )); + + cleanup().await; + } + + #[tokio::test] + async fn test_container_forcefully_stops() { + let container_name = "test-container-forcefully-stops"; + let filters = HashMap::from([("container", vec![container_name])]); + + let (event_tx, mut event_rx) = mpsc::unbounded_channel::(); + tokio::spawn(listen_docker_events(event_tx, filters)); + + let args = vec!["sh", "-c", "sleep 1"]; + let cleanup = run_test_container(container_name, args).await; + + cleanup().await; + + let start_event = event_rx.recv().await.expect("start event"); + assert!(matches!(start_event.event, ContainerEvent::Started { .. })); + + let die_event = event_rx.recv().await.expect("start event"); + assert!(matches!( + die_event.event, + ContainerEvent::Died { exit_code: 137 } + )); + } + + #[tokio::test] + async fn test_container_error_args() { + let container_name = "test-container-error-args"; + let filters = HashMap::from([("container", vec![container_name])]); + + let (event_tx, mut event_rx) = mpsc::unbounded_channel::(); + tokio::spawn(listen_docker_events(event_tx, filters)); + + let args = vec!["sh", "-c", "xxxx"]; + let cleanup = run_test_container(container_name, args).await; + + let start_event = event_rx.recv().await.expect("start event"); + assert!(matches!(start_event.event, ContainerEvent::Started { .. })); + + let die_event = event_rx.recv().await.expect("start event"); + assert!(matches!( + die_event.event, + ContainerEvent::Died { exit_code: 127 } + )); + + cleanup().await; + } +} diff --git a/crates/runtime-docker-compose/src/lib.rs b/crates/runtime-docker-compose/src/lib.rs index 184d369..008f6a0 100644 --- a/crates/runtime-docker-compose/src/lib.rs +++ b/crates/runtime-docker-compose/src/lib.rs @@ -1,3 +1,6 @@ mod compose; +mod deployment_watcher; mod runtime; + +pub use deployment_watcher::{ContainerEvent, ContainerState, ServiceStatus}; pub use runtime::DockerRuntime; diff --git a/crates/runtime-docker-compose/src/runtime.rs b/crates/runtime-docker-compose/src/runtime.rs index aa05d5c..de1a452 100644 --- a/crates/runtime-docker-compose/src/runtime.rs +++ b/crates/runtime-docker-compose/src/runtime.rs @@ -1,5 +1,5 @@ use bollard::Docker; -use bollard::query_parameters::{CreateImageOptions, EventsOptionsBuilder}; +use bollard::query_parameters::CreateImageOptions; use futures_util::future::join_all; use futures_util::stream::StreamExt; use spec::{File, Manifest}; @@ -7,11 +7,13 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::TcpListener; use std::process::{Command, Stdio}; use std::sync::{Arc, Mutex}; +use tokio::sync::mpsc; use crate::compose::Volume; use crate::compose::{ DependsOn, DependsOnCondition, DockerComposeService, DockerComposeSpec, Port, ServiceVolume, }; +use crate::deployment_watcher::{DeploymentState, DockerEventMessage, listen_docker_events}; #[derive(Clone)] struct ReservedPorts { @@ -85,44 +87,45 @@ fn load_reserved_ports(dir_path: &str, reserved_ports: &ReservedPorts) -> eyre:: pub struct DockerRuntime { dir_path: String, reserved_ports: ReservedPorts, + deployments: Arc>>>>, } impl DockerRuntime { pub fn new(dir_path: String) -> Self { + let reserved_ports = ReservedPorts::new(); + load_reserved_ports(&dir_path, &reserved_ports).unwrap(); + + let deployments: Arc>>>> = + Arc::new(Mutex::new(HashMap::new())); + + let (event_tx, mut event_rx) = mpsc::unbounded_channel::(); + + let filters = HashMap::from([("label", vec!["bbuilder=true"])]); + tokio::spawn(listen_docker_events(event_tx, filters)); + + let deployments_clone = Arc::clone(&deployments); tokio::spawn(async move { - let docker = Docker::connect_with_local_defaults().unwrap(); - - // Filter for container events only - let filters = HashMap::from([("label", vec!["bbuilder=true"])]); - let options = EventsOptionsBuilder::new().filters(&filters).build(); - - let mut events = docker.events(Some(options)); - tracing::debug!("Listening for container events..."); - - while let Some(event_result) = events.next().await { - match event_result { - Ok(event) => { - tracing::debug!("Event: {:?}", event.action); - if let Some(actor) = event.actor { - tracing::debug!(" Container ID: {:?}", actor.id); - if let Some(attrs) = actor.attributes { - if let Some(name) = attrs.get("name") { - tracing::debug!(" Container Name: {}", name); - } + while let Some(event_msg) = event_rx.recv().await { + if let Ok(deps) = deployments_clone.lock() { + for (manifest_name, state) in deps.iter() { + if event_msg.container_name.starts_with(manifest_name) { + if let Ok(mut w) = state.lock() { + w.handle_container_event( + event_msg.container_name.clone(), + event_msg.event, + ); } + break; } } - Err(e) => tracing::error!("Error: {}", e), } } }); - let reserved_ports = ReservedPorts::new(); - load_reserved_ports(&dir_path, &reserved_ports).unwrap(); - Self { dir_path, reserved_ports, + deployments, } } @@ -393,8 +396,13 @@ impl DockerRuntime { let docker_compose_spec = self.convert_to_docker_compose_spec(manifest)?; - // Pull images before running docker-compose - self.pull_images(&docker_compose_spec).await?; + // Initialize deployment state + let deployment_state = Arc::new(Mutex::new(DeploymentState::new(&docker_compose_spec))); + + // Register deployment in the watcher + if let Ok(mut deployments) = self.deployments.lock() { + deployments.insert(name.clone(), deployment_state); + } // Write the compose file in the parent folder let compose_file_path = parent_folder.join("docker-compose.yaml"); @@ -405,6 +413,9 @@ impl DockerRuntime { // Run docker-compose up in detached mode if !dry_run { + // Pull images before running docker-compose + self.pull_images(&docker_compose_spec).await?; + Command::new("docker-compose") .arg("-f") .arg(&compose_file_path) diff --git a/crates/spec/src/lib.rs b/crates/spec/src/lib.rs index 4eaf003..ce36de6 100644 --- a/crates/spec/src/lib.rs +++ b/crates/spec/src/lib.rs @@ -54,7 +54,7 @@ pub struct ChainSpec { pub min_version: String, } -#[derive(Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] pub struct Manifest { pub name: String, pub pods: HashMap,