diff --git a/Cargo.toml b/Cargo.toml index 2c541ec..fef8c7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ exclude = [ [dependencies] clap = { version = "4.5", features = ["derive", "env"] } clap_complete = "4.5" -tokio = { version = "1.49", features = ["rt-multi-thread", "macros", "process", "fs", "io-util", "signal"] } +tokio = { version = "1.49", features = ["rt-multi-thread", "macros", "process", "fs", "io-util", "signal", "time"] } thiserror = "2.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/images/base/mino-bootstrap b/images/base/mino-bootstrap index e55e6ae..1c352f4 100644 --- a/images/base/mino-bootstrap +++ b/images/base/mino-bootstrap @@ -31,7 +31,7 @@ mkdir -p "$STEP_DIR" 2>/dev/null || true if [ -n "$MINO_QUIET_BOOTSTRAP" ]; then INSTALL_REDIRECT="/dev/null" else - INSTALL_REDIRECT="/dev/stderr" + INSTALL_REDIRECT="$(mktemp /tmp/mino-bootstrap-XXXXXX.log)" fi # Log to stderr unless quiet mode is enabled diff --git a/src/cli/commands/cache.rs b/src/cli/commands/cache.rs index 6cd3f20..fc93448 100644 --- a/src/cli/commands/cache.rs +++ b/src/cli/commands/cache.rs @@ -12,7 +12,7 @@ use crate::home::HomeVolume; use crate::orchestration::{create_runtime, ContainerRuntime}; use crate::ui::{self, UiContext}; use chrono::Utc; -use console::style; +use console::{pad_str, style, Alignment}; use std::env; use std::path::PathBuf; use tracing::debug; @@ -100,15 +100,28 @@ async fn list_caches( } fn print_cache_table(caches: &[(CacheVolume, u64)], total_size: u64, limit_bytes: u64) { + const W_VOLUME: usize = 40; + const W_ECO: usize = 10; + const W_STATE: usize = 10; + const W_SIZE: usize = 10; + const W_CREATED: usize = 16; + let ctx = UiContext::detect(); ui::intro(&ctx, "Cache Volumes"); println!( - "{:<40} {:<10} {:<10} {:<10} {:<16}", - "VOLUME", "ECOSYSTEM", "STATE", "SIZE", "CREATED" + "{} {} {} {} {}", + pad_str("VOLUME", W_VOLUME, Alignment::Left, None), + pad_str("ECOSYSTEM", W_ECO, Alignment::Left, None), + pad_str("STATE", W_STATE, Alignment::Left, None), + pad_str("SIZE", W_SIZE, Alignment::Left, None), + pad_str("CREATED", W_CREATED, Alignment::Left, None), + ); + println!( + "{}", + "-".repeat(W_VOLUME + 1 + W_ECO + 1 + W_STATE + 1 + W_SIZE + 1 + W_CREATED) ); - println!("{}", "-".repeat(90)); for (cache, size) in caches { let state_display = match cache.state { @@ -126,8 +139,12 @@ fn print_cache_table(caches: &[(CacheVolume, u64)], total_size: u64, limit_bytes let created = cache.created_at.format("%Y-%m-%d %H:%M").to_string(); println!( - "{:<40} {:<10} {:<10} {:<10} {:<16}", - cache.name, cache.ecosystem, state_display, size_display, created + "{} {} {} {} {}", + pad_str(&cache.name, W_VOLUME, Alignment::Left, None), + pad_str(&cache.ecosystem.to_string(), W_ECO, Alignment::Left, None), + pad_str(&state_display, W_STATE, Alignment::Left, None), + pad_str(&size_display, W_SIZE, Alignment::Left, None), + pad_str(&created, W_CREATED, Alignment::Left, None), ); } @@ -162,16 +179,30 @@ fn print_cache_table(caches: &[(CacheVolume, u64)], total_size: u64, limit_bytes } fn print_home_table(home_vols: &[HomeVolume]) { + const W_VOLUME: usize = 40; + const W_PROJECT: usize = 40; + const W_CREATED: usize = 16; + let ctx = UiContext::detect(); ui::intro(&ctx, "Home Volumes"); - println!("{:<40} {:<40} {:<16}", "VOLUME", "PROJECT", "CREATED"); - println!("{}", "-".repeat(96)); + println!( + "{} {} {}", + pad_str("VOLUME", W_VOLUME, Alignment::Left, None), + pad_str("PROJECT", W_PROJECT, Alignment::Left, None), + pad_str("CREATED", W_CREATED, Alignment::Left, None), + ); + println!("{}", "-".repeat(W_VOLUME + 1 + W_PROJECT + 1 + W_CREATED)); for hv in home_vols { let created = hv.created_at.format("%Y-%m-%d %H:%M").to_string(); - println!("{:<40} {:<40} {:<16}", hv.name, hv.project_path, created); + println!( + "{} {} {}", + pad_str(&hv.name, W_VOLUME, Alignment::Left, None), + pad_str(&hv.project_path, W_PROJECT, Alignment::Left, Some("...")), + pad_str(&created, W_CREATED, Alignment::Left, None), + ); } println!(); diff --git a/src/cli/commands/list.rs b/src/cli/commands/list.rs index c04e8a5..dd9ddc9 100644 --- a/src/cli/commands/list.rs +++ b/src/cli/commands/list.rs @@ -5,7 +5,7 @@ use crate::config::Config; use crate::error::MinoResult; use crate::session::{Session, SessionManager, SessionStatus}; use crate::ui::{self, UiContext}; -use console::style; +use console::{pad_str, style, Alignment}; /// Execute the list command pub async fn execute(args: ListArgs, _config: &Config) -> MinoResult<()> { @@ -64,24 +64,52 @@ fn format_plain(sessions: &[Session]) -> String { } fn print_table(sessions: &[Session]) { + const W_NAME: usize = 20; + const W_STATUS: usize = 12; + const W_STARTED: usize = 15; + const W_PROJECT: usize = 30; + let ctx = UiContext::detect(); ui::intro(&ctx, "Sessions"); println!( - "{:<20} {:<12} {:<15} {:<30}", - style("NAME").bold(), - style("STATUS").bold(), - style("STARTED").bold(), - style("PROJECT").bold() + "{} {} {} {}", + pad_str( + &style("NAME").bold().to_string(), + W_NAME, + Alignment::Left, + None + ), + pad_str( + &style("STATUS").bold().to_string(), + W_STATUS, + Alignment::Left, + None + ), + pad_str( + &style("STARTED").bold().to_string(), + W_STARTED, + Alignment::Left, + None + ), + pad_str( + &style("PROJECT").bold().to_string(), + W_PROJECT, + Alignment::Left, + None + ), + ); + println!( + "{}", + "-".repeat(W_NAME + 1 + W_STATUS + 1 + W_STARTED + 1 + W_PROJECT) ); - println!("{}", "-".repeat(77)); for session in sessions { let status_styled = match session.status { - SessionStatus::Running => style("running").green(), - SessionStatus::Starting => style("starting").yellow(), - SessionStatus::Stopped => style("stopped").dim(), - SessionStatus::Failed => style("failed").red(), + SessionStatus::Running => style("running").green().to_string(), + SessionStatus::Starting => style("starting").yellow().to_string(), + SessionStatus::Stopped => style("stopped").dim().to_string(), + SessionStatus::Failed => style("failed").red().to_string(), }; let started = session.created_at.format("%Y-%m-%d %H:%M").to_string(); @@ -92,8 +120,11 @@ fn print_table(sessions: &[Session]) { .unwrap_or("unknown"); println!( - "{:<20} {:<12} {:<15} {:<30}", - session.name, status_styled, started, project + "{} {} {} {}", + pad_str(&session.name, W_NAME, Alignment::Left, None), + pad_str(&status_styled, W_STATUS, Alignment::Left, None), + pad_str(&started, W_STARTED, Alignment::Left, None), + pad_str(project, W_PROJECT, Alignment::Left, None), ); } diff --git a/src/cli/commands/run/mod.rs b/src/cli/commands/run/mod.rs index 4980be5..da064e8 100644 --- a/src/cli/commands/run/mod.rs +++ b/src/cli/commands/run/mod.rs @@ -18,7 +18,8 @@ use crate::cli::args::RunArgs; use crate::config::Config; use crate::error::{MinoError, MinoResult}; use crate::network::{ - generate_iptables_wrapper, resolve_network_mode, NetworkMode, NetworkResolutionInput, + generate_iptables_wrapper, resolve_network_mode, shell_escape, NetworkMode, + NetworkResolutionInput, }; use crate::orchestration::{create_runtime, ContainerConfig, ContainerRuntime, Platform}; use crate::session::{Session, SessionManager, SessionStatus}; @@ -197,7 +198,7 @@ pub async fn execute(args: RunArgs, config: &Config) -> MinoResult<()> { } // Layers compose on mino-base which has Oh My Zsh configured - let command = if args.command.is_empty() { + let shell_command = if args.command.is_empty() { if using_layers { vec!["/bin/zsh".to_string()] } else { @@ -208,11 +209,13 @@ pub async fn execute(args: RunArgs, config: &Config) -> MinoResult<()> { }; let command = if let NetworkMode::Allow(ref rules) = network_mode { - generate_iptables_wrapper(rules, &command) + generate_iptables_wrapper(rules, &shell_command) } else { - command + shell_command.clone() }; + let is_shell_mode = args.command.is_empty(); + let mut session = Session::new( session_name.clone(), project_dir.clone(), @@ -269,6 +272,9 @@ pub async fn execute(args: RunArgs, config: &Config) -> MinoResult<()> { audit: &audit, spinner: &mut spinner, config, + is_shell_mode, + shell_command, + network_mode: &network_mode, }; if args.detach { @@ -289,6 +295,12 @@ struct RunContext<'a> { audit: &'a AuditLog, spinner: &'a mut TaskSpinner, config: &'a Config, + /// True when the user launched a bare shell (no explicit command) + is_shell_mode: bool, + /// The bare shell command for exec phase (e.g. ["/bin/zsh"]) + shell_command: Vec, + /// Resolved network mode (needed by two-phase startup for iptables wrapping) + network_mode: &'a NetworkMode, } impl RunContext<'_> { @@ -391,39 +403,26 @@ async fn run_detached(ctx: &mut RunContext<'_>, cache_session: CacheSession) -> } /// Run container in interactive mode with synchronous cache finalization. +/// +/// Routes to either `run_interactive_shell` (two-phase: sleep + exec) for bare +/// shell mode, or the existing `start_attached` flow for explicit commands. async fn run_interactive(ctx: &mut RunContext<'_>, cache_session: CacheSession) -> MinoResult<()> { - let container_id = match ctx.runtime.create(ctx.container_config, ctx.command).await { - Ok(id) => id, - Err(e) => return ctx.record_failure(e).await, + let exit_code = if ctx.is_shell_mode { + run_interactive_shell(ctx).await? + } else { + run_interactive_command(ctx).await? }; - ctx.record_start(&container_id).await?; - - ctx.spinner.clear(); - - debug!("Starting container attached: {}", &container_id[..12]); - - let exit_code = ctx.runtime.start_attached(&container_id).await?; - // Finalize caches on clean exit if exit_code == 0 && !cache_session.volumes_to_finalize.is_empty() { finalize_caches(&cache_session).await; } - // Clean up session + // Clean up session state ctx.manager .update_status(ctx.session_name, SessionStatus::Stopped) .await?; - // Remove stopped container to prevent credential persistence in `podman inspect` - if let Err(e) = ctx.runtime.remove(&container_id).await { - warn!( - "Failed to remove container {}: {}", - &container_id[..12.min(container_id.len())], - e - ); - } - ctx.audit .log( "session.stopped", @@ -442,8 +441,9 @@ async fn run_interactive(ctx: &mut RunContext<'_>, cache_session: CacheSession) ); } - // Show update notification on exit (uses cached check, no new HTTP request) - if let Some(update) = crate::version::check_for_update(ctx.config).await { + // Show update notification on exit (reads cached state from disk, picks up + // any background refresh that completed during this session) + if let Some(update) = crate::version::load_cached_update(ctx.config).await { let method = crate::version::detect_install_method(); let hint = crate::version::update_hint(&method); println!( @@ -458,6 +458,139 @@ async fn run_interactive(ctx: &mut RunContext<'_>, cache_session: CacheSession) Ok(()) } +/// Existing flow for explicit commands: create + start_attached. +/// +/// Non-interactive commands like `mino run -- cargo build` need the entrypoint's +/// env setup (nvm, cargo sourcing), so they use `start_attached` which runs the +/// full entrypoint. +async fn run_interactive_command(ctx: &mut RunContext<'_>) -> MinoResult { + let container_id = match ctx.runtime.create(ctx.container_config, ctx.command).await { + Ok(id) => id, + Err(e) => return ctx.record_failure(e).await, + }; + + ctx.record_start(&container_id).await?; + ctx.spinner.clear(); + + debug!("Starting container attached: {}", &container_id[..12]); + let exit_code = ctx.runtime.start_attached(&container_id).await?; + + // Remove container (start_attached returns after it exits) + if let Err(e) = ctx.runtime.remove(&container_id).await { + warn!( + "Failed to remove container {}: {}", + &container_id[..12.min(container_id.len())], + e + ); + } + + Ok(exit_code) +} + +/// Two-phase shell startup: create with sleep infinity, bootstrap via spinner, +/// then exec into interactive shell. +/// +/// This avoids dumping hundreds of lines of npm/rustup output into the terminal. +/// Instead, bootstrap output goes to a log file inside the container, and we +/// show a spinner while monitoring `podman logs -f` for the "Bootstrap complete." +/// marker. +async fn run_interactive_shell(ctx: &mut RunContext<'_>) -> MinoResult { + // Phase 1: Create container with sleep infinity + let sleep_command = vec!["sleep".to_string(), "infinity".to_string()]; + let phase1_command = if let NetworkMode::Allow(ref rules) = ctx.network_mode { + generate_iptables_wrapper(rules, &sleep_command) + } else { + sleep_command + }; + + let container_id = match ctx + .runtime + .create(ctx.container_config, &phase1_command) + .await + { + Ok(id) => id, + Err(e) => return ctx.record_failure(e).await, + }; + + ctx.record_start(&container_id).await?; + + // Start container detached + if let Err(e) = ctx.runtime.start_detached(&container_id).await { + // Clean up on failure + let _ = ctx.runtime.remove(&container_id).await; + return ctx.record_failure(e).await; + } + + // Monitor bootstrap via logs + ctx.spinner.message("Setting up environment..."); + let bootstrap_timeout = std::time::Duration::from_secs(300); + let found = ctx + .runtime + .logs_follow_until( + &container_id, + "Bootstrap complete.", + bootstrap_timeout, + &|line: String| { + debug!("bootstrap: {}", line); + }, + ) + .await?; + + if !found { + warn!("Bootstrap marker not found within timeout, proceeding anyway"); + } + + ctx.spinner.clear(); + + // Phase 2: Exec interactive shell + // When NetworkMode::Allow is active, the container has CAP_NET_ADMIN for + // iptables setup in phase 1. Drop it before handing control to the user + // shell to prevent `iptables -F` from bypassing the firewall rules. + let exec_command = if matches!(ctx.network_mode, NetworkMode::Allow(_)) { + let escaped_args: String = ctx + .shell_command + .iter() + .map(|arg| format!(" '{}'", shell_escape(arg))) + .collect(); + vec![ + "/bin/sh".to_string(), + "-c".to_string(), + format!( + "if command -v capsh >/dev/null 2>&1; then exec capsh --drop=cap_net_admin -- -c 'exec \"$@\"' --{}; \ + else echo 'mino: capsh not found. Cannot drop CAP_NET_ADMIN -- network allowlist is bypassable without it.' >&2; exit 1; fi", + escaped_args + ), + ] + } else { + ctx.shell_command.clone() + }; + debug!( + "Exec into container {}: {:?}", + &container_id[..12], + exec_command + ); + let exit_code = ctx + .runtime + .exec_in_container(&container_id, &exec_command, true) + .await?; + + // Stop the sleep infinity process + if let Err(e) = ctx.runtime.stop(&container_id).await { + warn!("Failed to stop container {}: {}", &container_id[..12], e); + } + + // Remove container + if let Err(e) = ctx.runtime.remove(&container_id).await { + warn!( + "Failed to remove container {}: {}", + &container_id[..12.min(container_id.len())], + e + ); + } + + Ok(exit_code) +} + async fn validate_environment() -> MinoResult<()> { match Platform::detect() { Platform::MacOS => { @@ -825,10 +958,17 @@ mod tests { config: Config, audit: AuditLog, spinner: TaskSpinner, + is_shell_mode: bool, + shell_command: Vec, + network_mode: NetworkMode, } impl SmokeTestFixture { async fn new(prefix: &str) -> Self { + Self::with_shell_mode(prefix, false).await + } + + async fn with_shell_mode(prefix: &str, shell_mode: bool) -> Self { let session_name = format!("{}-{}", prefix, &Uuid::new_v4().to_string()[..8]); let cleanup = SessionCleanup { name: session_name.clone(), @@ -866,6 +1006,9 @@ mod tests { config, audit, spinner, + is_shell_mode: shell_mode, + shell_command: vec!["/bin/zsh".to_string()], + network_mode: NetworkMode::Bridge, } } @@ -879,14 +1022,19 @@ mod tests { audit: &self.audit, spinner: &mut self.spinner, config: &self.config, + is_shell_mode: self.is_shell_mode, + shell_command: self.shell_command.clone(), + network_mode: &self.network_mode, } } } #[tokio::test] #[serial] - async fn smoke_run_interactive() { + async fn smoke_run_interactive_command() { let mut f = SmokeTestFixture::new("test-smoke-int").await; + // is_shell_mode=false (default) → uses start_attached flow + assert!(!f.is_shell_mode); run_interactive(&mut f.run_ctx(), CacheSession::default()) .await @@ -900,6 +1048,30 @@ mod tests { assert_eq!(updated.status, SessionStatus::Stopped); } + #[tokio::test] + #[serial] + async fn smoke_run_interactive_shell() { + let mut f = SmokeTestFixture::with_shell_mode("test-smoke-shell", true).await; + + run_interactive(&mut f.run_ctx(), CacheSession::default()) + .await + .unwrap(); + + // Two-phase: create (with sleep), start_detached, logs_follow_until, exec_in_container, stop, remove + f.mock.assert_called("create", 1); + f.mock.assert_called("start_detached", 1); + f.mock.assert_called("logs_follow_until", 1); + f.mock.assert_called("exec_in_container", 1); + f.mock.assert_called("stop", 1); + f.mock.assert_called("remove", 1); + + // Should not use start_attached (that's the old flow) + f.mock.assert_called("start_attached", 0); + + let updated = f.manager.get(&f.session_name).await.unwrap().unwrap(); + assert_eq!(updated.status, SessionStatus::Stopped); + } + #[tokio::test] #[serial] async fn smoke_run_detached() { @@ -972,4 +1144,113 @@ mod tests { let resolution = resolve_final_image("base", false); assert_eq!(resolution.image, LAYER_BASE_IMAGE); } + + impl SmokeTestFixture { + /// Build a fixture with a pre-configured `MockRuntime`. + /// + /// This lets us queue error responses before wrapping the mock in `Arc`, + /// which is necessary because `MockRuntime::on()` takes `self` by value. + async fn with_mock(prefix: &str, mock: MockRuntime, shell_mode: bool) -> Self { + let session_name = format!("{}-{}", prefix, &Uuid::new_v4().to_string()[..8]); + let cleanup = SessionCleanup { + name: session_name.clone(), + }; + + let manager = SessionManager::new().await.unwrap(); + let session = Session::new( + session_name.clone(), + PathBuf::from("/tmp/test-project"), + vec!["bash".to_string()], + SessionStatus::Starting, + ); + manager.create(&session).await.unwrap(); + + let mock = Arc::new(mock); + let runtime: Arc = mock.clone(); + + let container_config = test_container_config(); + let command = vec!["bash".to_string()]; + let mut config = Config::default(); + config.general.audit_log = false; + config.general.update_check = false; + let audit = AuditLog::new(&config); + let ctx = UiContext::detect(); + let spinner = TaskSpinner::new(&ctx); + + Self { + mock, + runtime, + manager, + session_name, + _cleanup: cleanup, + container_config, + command, + config, + audit, + spinner, + is_shell_mode: shell_mode, + shell_command: vec!["/bin/zsh".to_string()], + network_mode: NetworkMode::Bridge, + } + } + } + + #[tokio::test] + #[serial] + async fn shell_start_detached_failure_cleans_up_container() { + let mock = MockRuntime::new().on_err( + "start_detached", + MinoError::ContainerStart("engine failure".to_string()), + ); + + let mut f = SmokeTestFixture::with_mock("test-shell-detach-err", mock, true).await; + + let result = run_interactive_shell(&mut f.run_ctx()).await; + + assert!(result.is_err()); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("engine failure"), + "expected 'engine failure' in error, got: {}", + err_msg + ); + + // Should have attempted cleanup: create, start_detached (failed), remove, then record_failure + f.mock.assert_called("create", 1); + f.mock.assert_called("start_detached", 1); + f.mock.assert_called("remove", 1); + + // Session should be marked as Failed + let updated = f.manager.get(&f.session_name).await.unwrap().unwrap(); + assert_eq!(updated.status, SessionStatus::Failed); + } + + #[tokio::test] + #[serial] + async fn shell_logs_follow_until_error_propagates() { + let mock = MockRuntime::new().on_err( + "logs_follow_until", + MinoError::Internal("log stream broken".to_string()), + ); + + let mut f = SmokeTestFixture::with_mock("test-shell-logs-err", mock, true).await; + + let result = run_interactive_shell(&mut f.run_ctx()).await; + + assert!(result.is_err()); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("log stream broken"), + "expected 'log stream broken' in error, got: {}", + err_msg + ); + + // create and start_detached succeed, logs_follow_until fails + f.mock.assert_called("create", 1); + f.mock.assert_called("start_detached", 1); + f.mock.assert_called("logs_follow_until", 1); + + // Should NOT proceed to exec phase + f.mock.assert_called("exec_in_container", 0); + } } diff --git a/src/orchestration/mock.rs b/src/orchestration/mock.rs index decc7f0..5a6465a 100644 --- a/src/orchestration/mock.rs +++ b/src/orchestration/mock.rs @@ -371,6 +371,26 @@ impl ContainerRuntime for MockRuntime { self.record("get_container_exit_code", vec![container_id.to_string()]); self.take_optional_int("get_container_exit_code", Some(0)) } + + async fn start_detached(&self, container_id: &str) -> MinoResult<()> { + self.record("start_detached", vec![container_id.to_string()]); + self.take_unit("start_detached") + } + + async fn logs_follow_until( + &self, + container_id: &str, + marker: &str, + _timeout: std::time::Duration, + on_line: &(dyn Fn(String) + Send + Sync), + ) -> MinoResult { + self.record( + "logs_follow_until", + vec![container_id.to_string(), marker.to_string()], + ); + on_line("Bootstrap complete.".to_string()); + self.take_bool("logs_follow_until", true) + } } /// Create a test session with the given name, status, and optional container ID. diff --git a/src/orchestration/mod.rs b/src/orchestration/mod.rs index e59a78d..b57c4ca 100644 --- a/src/orchestration/mod.rs +++ b/src/orchestration/mod.rs @@ -86,6 +86,74 @@ pub(crate) async fn stream_child_output( all_output } +/// Follow a child process's stderr/stdout until a marker line is found or timeout expires. +/// +/// Returns `true` if the marker was found, `false` on timeout. Calls `on_line` +/// for each line received. The child process is killed on timeout or once marker is found. +/// +/// Uses `String` callback (same pattern as `stream_child_output`) to avoid +/// lifetime issues with `async_trait` desugaring. +pub(crate) async fn follow_until_marker( + child: &mut tokio::process::Child, + marker: &str, + timeout: std::time::Duration, + on_line: &(dyn Fn(String) + Send + Sync), +) -> bool { + let stderr = child.stderr.take().expect("stderr piped"); + let stdout = child.stdout.take().expect("stdout piped"); + + let mut stderr_reader = BufReader::new(stderr).lines(); + let mut stdout_reader = BufReader::new(stdout).lines(); + + let mut stderr_done = false; + let mut stdout_done = false; + let mut found = false; + + let sleep = tokio::time::sleep(timeout); + tokio::pin!(sleep); + + while !found && (!stderr_done || !stdout_done) { + tokio::select! { + _ = &mut sleep => { + // Timeout — kill the logs process + let _ = child.kill().await; + let _ = child.wait().await; // reap to avoid zombie + break; + } + result = stderr_reader.next_line(), if !stderr_done => { + match result { + Ok(Some(line)) => { + if line.contains(marker) { + found = true; + } + on_line(line); + } + _ => stderr_done = true, + } + } + result = stdout_reader.next_line(), if !stdout_done => { + match result { + Ok(Some(line)) => { + if line.contains(marker) { + found = true; + } + on_line(line); + } + _ => stdout_done = true, + } + } + } + } + + if found { + // Marker found — kill the tailing process (we don't need more logs) + let _ = child.kill().await; + let _ = child.wait().await; // reap to avoid zombie + } + + found +} + /// Parse `du -sb` output to extract the byte size. /// /// `du -sb` prints `\t` -- this extracts and parses the leading @@ -182,6 +250,7 @@ pub(crate) fn parse_volume_inspect_json( mod tests { use super::*; use crate::error::MinoError; + use std::sync::{Arc, Mutex}; // -- parse_du_bytes -- @@ -475,4 +544,106 @@ mod tests { assert_eq!(result.labels.len(), 1); assert_eq!(result.labels["valid"], "yes"); } + + // -- follow_until_marker -- + + /// Spawn a child process with piped stdout/stderr for marker tests. + fn spawn_marker_test(script: &str) -> tokio::process::Child { + tokio::process::Command::new("sh") + .arg("-c") + .arg(script) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .expect("failed to spawn") + } + + /// Create a line-collecting callback for marker tests. + fn collecting_callback() -> (impl Fn(String) + Send + Sync, Arc>>) { + let lines = Arc::new(Mutex::new(Vec::::new())); + let lines_clone = lines.clone(); + let on_line = move |line: String| { + lines_clone.lock().unwrap().push(line); + }; + (on_line, lines) + } + + #[tokio::test] + async fn follow_until_marker_found_on_stdout() { + let mut child = + spawn_marker_test("echo 'line one'; echo 'READY_MARKER'; echo 'line three'"); + let (on_line, lines) = collecting_callback(); + + let found = follow_until_marker( + &mut child, + "READY_MARKER", + std::time::Duration::from_secs(5), + &on_line, + ) + .await; + + assert!(found, "marker should have been found"); + let captured = lines.lock().unwrap(); + assert!( + captured.iter().any(|l| l.contains("READY_MARKER")), + "captured lines should include the marker" + ); + } + + #[tokio::test] + async fn follow_until_marker_found_on_stderr() { + let mut child = spawn_marker_test("echo 'stderr READY_MARKER' >&2"); + + let found = follow_until_marker( + &mut child, + "READY_MARKER", + std::time::Duration::from_secs(5), + &|_| {}, + ) + .await; + + assert!(found, "marker should have been found on stderr"); + } + + #[tokio::test] + async fn follow_until_marker_timeout_when_not_found() { + // sleep 60 will never produce the marker, so we expect a timeout + let mut child = tokio::process::Command::new("sleep") + .arg("60") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .expect("failed to spawn"); + + let found = follow_until_marker( + &mut child, + "NEVER_APPEARS", + std::time::Duration::from_millis(100), + &|_| {}, + ) + .await; + + assert!(!found, "should return false on timeout"); + } + + #[tokio::test] + async fn follow_until_marker_eof_without_marker() { + let mut child = spawn_marker_test("echo 'no marker here'; echo 'still no marker'"); + let (on_line, lines) = collecting_callback(); + + let found = follow_until_marker( + &mut child, + "MISSING_MARKER", + std::time::Duration::from_secs(5), + &on_line, + ) + .await; + + assert!( + !found, + "should return false when EOF reached without marker" + ); + let captured = lines.lock().unwrap(); + assert_eq!(captured.len(), 2, "should have captured both output lines"); + } } diff --git a/src/orchestration/native_podman.rs b/src/orchestration/native_podman.rs index e784d4d..2c4260f 100644 --- a/src/orchestration/native_podman.rs +++ b/src/orchestration/native_podman.rs @@ -540,6 +540,36 @@ impl ContainerRuntime for NativePodmanRuntime { } } } + + async fn start_detached(&self, container_id: &str) -> MinoResult<()> { + debug!("Starting container detached: {}", container_id); + let output = self.exec(&["start", container_id]).await?; + if output.status.success() { + Ok(()) + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + Err(MinoError::ContainerStart(stderr.to_string())) + } + } + + async fn logs_follow_until( + &self, + container_id: &str, + marker: &str, + timeout: std::time::Duration, + on_line: &(dyn Fn(String) + Send + Sync), + ) -> MinoResult { + debug!("Following logs for {} until '{}'", container_id, marker); + + let mut child = Command::new("podman") + .args(["logs", "-f", container_id]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .map_err(|e| MinoError::command_failed("podman logs -f", e))?; + + Ok(super::follow_until_marker(&mut child, marker, timeout, on_line).await) + } } #[cfg(test)] diff --git a/src/orchestration/orbstack_runtime.rs b/src/orchestration/orbstack_runtime.rs index 0e1ab98..6abc431 100644 --- a/src/orchestration/orbstack_runtime.rs +++ b/src/orchestration/orbstack_runtime.rs @@ -607,6 +607,36 @@ impl ContainerRuntime for OrbStackRuntime { } } } + + async fn start_detached(&self, container_id: &str) -> MinoResult<()> { + debug!("Starting container detached: {}", container_id); + let output = self + .orbstack + .exec(&["podman", "start", container_id]) + .await?; + if output.status.success() { + Ok(()) + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + Err(MinoError::ContainerStart(stderr.to_string())) + } + } + + async fn logs_follow_until( + &self, + container_id: &str, + marker: &str, + timeout: std::time::Duration, + on_line: &(dyn Fn(String) + Send + Sync), + ) -> MinoResult { + debug!("Following logs for {} until '{}'", container_id, marker); + + let mut child = self + .orbstack + .spawn_piped(&["podman", "logs", "-f", container_id])?; + + Ok(super::follow_until_marker(&mut child, marker, timeout, on_line).await) + } } #[cfg(test)] diff --git a/src/orchestration/runtime.rs b/src/orchestration/runtime.rs index fd8f17c..5523dd6 100644 --- a/src/orchestration/runtime.rs +++ b/src/orchestration/runtime.rs @@ -125,4 +125,19 @@ pub trait ContainerRuntime: Send + Sync { /// the exit code. Returns `None` if the exit code cannot be determined /// (e.g. the container was already removed). async fn get_container_exit_code(&self, container_id: &str) -> MinoResult>; + + /// Start a created container in detached mode. + async fn start_detached(&self, container_id: &str) -> MinoResult<()>; + + /// Follow container logs until a marker string is found or timeout expires. + /// + /// Calls `on_line` for each log line received. Returns `true` if the marker + /// was found, `false` on timeout. + async fn logs_follow_until( + &self, + container_id: &str, + marker: &str, + timeout: std::time::Duration, + on_line: &(dyn Fn(String) + Send + Sync), + ) -> MinoResult; } diff --git a/src/version.rs b/src/version.rs index d26ccde..5c72f7d 100644 --- a/src/version.rs +++ b/src/version.rs @@ -13,7 +13,7 @@ use crate::orchestration::ContainerRuntime; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use std::path::{Path, PathBuf}; -use tracing::warn; +use tracing::{debug, warn}; const STATE_FILENAME: &str = "version_state.json"; const GITHUB_RELEASES_URL: &str = "https://api.github.com/repos/dean0x/mino/releases/latest"; @@ -149,8 +149,17 @@ async fn save_state_to(path: &Path, state: &VersionState) { return; } }; - if let Err(e) = tokio::fs::write(path, json).await { - warn!("Failed to write version state: {}", e); + // Atomic write: write to temp file then rename to avoid partial reads + // from concurrent mino sessions racing on the same state file. + let tmp_path = path.with_extension("tmp"); + if let Err(e) = tokio::fs::write(&tmp_path, json).await { + warn!("Failed to write version state temp file: {}", e); + return; + } + if let Err(e) = tokio::fs::rename(&tmp_path, path).await { + warn!("Failed to rename version state temp file: {}", e); + // Clean up orphaned temp file on rename failure + let _ = tokio::fs::remove_file(&tmp_path).await; } } @@ -222,54 +231,32 @@ pub async fn check_for_update(config: &Config) -> Option { check_for_update_inner(config, &state_path()).await } -async fn check_for_update_inner(config: &Config, path: &Path) -> Option { - if !config.general.update_check { - return None; - } - - let mut state = load_state_from(path).await; - let current = env!("CARGO_PKG_VERSION"); +/// Load cached update info without triggering a background refresh. +/// +/// Reads the persisted version state and returns `Some(UpdateInfo)` if the +/// cached `latest_available` is newer than the running binary. Unlike +/// `check_for_update`, this never spawns an HTTP request -- ideal for exit +/// notifications where we just want to surface any result cached earlier. +pub async fn load_cached_update(config: &Config) -> Option { + load_cached_update_inner(config, &state_path()).await +} - if !should_check_update(&state) { - // Use cached result - let latest = state.latest_available.as_deref()?; - if is_newer_version(latest, current) { - return Some(UpdateInfo { - latest: latest.to_string(), - current: current.to_string(), - }); - } +async fn load_cached_update_inner(config: &Config, path: &Path) -> Option { + if !config.general.update_check { return None; } - // Perform HTTP check - let body = match tokio::task::spawn_blocking(fetch_latest_release).await { - Ok(Ok(body)) => body, - Ok(Err(e)) => { - warn!("Update check failed: {}", e); - return None; - } - Err(e) => { - warn!("Update check task failed: {}", e); - return None; - } - }; - - let latest = match parse_github_release(&body) { - Some(v) => v, - None => { - warn!("Failed to parse GitHub release response"); - return None; - } - }; - - state.last_update_check = Some(Utc::now()); - state.latest_available = Some(latest.clone()); - save_state_to(path, &state).await; + let state = load_state_from(path).await; + cached_update_from_state(&state) +} - if is_newer_version(&latest, current) { +/// Build an `UpdateInfo` from cached state if a newer version is available. +fn cached_update_from_state(state: &VersionState) -> Option { + let current = env!("CARGO_PKG_VERSION"); + let latest = state.latest_available.as_deref()?; + if is_newer_version(latest, current) { Some(UpdateInfo { - latest, + latest: latest.to_string(), current: current.to_string(), }) } else { @@ -277,6 +264,46 @@ async fn check_for_update_inner(config: &Config, path: &Path) -> Option Option { + if !config.general.update_check { + return None; + } + + let state = load_state_from(path).await; + + // Background refresh if cache is stale (fire-and-forget) + if should_check_update(&state) { + let path = path.to_path_buf(); + tokio::spawn(async move { + let body = match tokio::task::spawn_blocking(fetch_latest_release).await { + Ok(Ok(body)) => body, + Ok(Err(e)) => { + debug!("Background update check failed: {}", e); + return; + } + Err(e) => { + debug!("Background update check task panicked: {}", e); + return; + } + }; + match parse_github_release(&body) { + Some(latest) => { + let mut state = load_state_from(&path).await; + state.last_update_check = Some(Utc::now()); + state.latest_available = Some(latest); + save_state_to(&path, &state).await; + } + None => { + debug!("Background update check: failed to parse release response"); + } + } + }); + } + + // Always use cached result (instant, zero latency) + cached_update_from_state(&state) +} + fn fetch_latest_release() -> Result { use std::time::Duration; use ureq::Agent; @@ -695,6 +722,41 @@ mod tests { assert!(result.is_none()); } + #[tokio::test] + async fn update_check_first_call_no_cache_returns_none() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("state.json"); + + // No state file at all — first run + let config = Config::default(); + let result = check_for_update_inner(&config, &path).await; + // Returns None because there's no cached latest_available yet + // (background task would populate it for next session) + assert!(result.is_none()); + } + + #[tokio::test] + async fn update_check_stale_cache_returns_cached_result() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("state.json"); + + // Stale cache (>24h) but has a cached newer version + let state = VersionState { + installed_version: Some(env!("CARGO_PKG_VERSION").to_string()), + last_update_check: Some(Utc::now() - chrono::Duration::hours(25)), + latest_available: Some("99.0.0".to_string()), + }; + save_state_to(&path, &state).await; + + let config = Config::default(); + let result = check_for_update_inner(&config, &path).await; + // Returns cached result immediately even though cache is stale + // (background task refreshes for next time) + assert!(result.is_some()); + let info = result.unwrap(); + assert_eq!(info.latest, "99.0.0"); + } + // --- clear_composed_images tests --- #[tokio::test]