diff --git a/.gitignore b/.gitignore index 9f97022..23bd54c 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -target/ \ No newline at end of file +target/ +.bcvk/ diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 0000000..142a070 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +docs/HACKING.md \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 4705295..2492b52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,6 +165,7 @@ dependencies = [ "comfy-table", "const_format", "data-encoding", + "dialoguer", "dirs", "fn-error-context", "indicatif", @@ -189,6 +190,8 @@ dependencies = [ "tempfile", "thiserror 1.0.69", "tokio", + "toml", + "toml_edit", "tracing", "tracing-error", "tracing-subscriber", @@ -644,6 +647,20 @@ dependencies = [ "syn", ] +[[package]] +name = "dialoguer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +dependencies = [ + "console", + "fuzzy-matcher", + "shell-words", + "tempfile", + "thiserror 1.0.69", + "zeroize", +] + [[package]] name = "digest" version = "0.10.7" @@ -883,6 +900,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fuzzy-matcher" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54614a3312934d066701a80f20f15fa3b56d67ac7722b39eea5b4c9dd1d66c94" +dependencies = [ + "thread_local", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -2265,6 +2291,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "shlex" version = "1.3.0" diff --git a/crates/integration-tests/src/main.rs b/crates/integration-tests/src/main.rs index 1d27d4d..1012b8a 100644 --- a/crates/integration-tests/src/main.rs +++ b/crates/integration-tests/src/main.rs @@ -21,6 +21,7 @@ mod tests { pub mod libvirt_upload_disk; pub mod libvirt_verb; pub mod mount_feature; + pub mod project; pub mod run_ephemeral; pub mod run_ephemeral_ssh; pub mod to_disk; diff --git a/crates/integration-tests/src/tests/project.rs b/crates/integration-tests/src/tests/project.rs new file mode 100644 index 0000000..fafa858 --- /dev/null +++ b/crates/integration-tests/src/tests/project.rs @@ -0,0 +1,213 @@ +//! Integration tests for bcvk project commands +//! +//! ⚠️ **CRITICAL INTEGRATION TEST POLICY** ⚠️ +//! +//! INTEGRATION TESTS MUST NEVER "warn and continue" ON FAILURES! +//! +//! If something is not working: +//! - Use `todo!("reason why this doesn't work yet")` +//! - Use `panic!("clear error message")` +//! - Use `assert!()` and `unwrap()` to fail hard +//! +//! NEVER use patterns like: +//! - "Note: test failed - likely due to..." +//! - "This is acceptable in CI/testing environments" +//! - Warning and continuing on failures + +use camino::Utf8PathBuf; +use color_eyre::Result; +use linkme::distributed_slice; +use std::process::Command; +use tempfile::TempDir; + +use crate::{get_bck_command, IntegrationTest, INTEGRATION_TESTS}; + +#[distributed_slice(INTEGRATION_TESTS)] +static TEST_PROJECT_WORKFLOW: IntegrationTest = + IntegrationTest::new("project_upgrade_workflow", test_project_upgrade_workflow); + +/// Test the full project workflow including upgrade +/// +/// This test: +/// 1. Creates a custom bootc image based on centos-bootc:stream10 +/// 2. Initializes a bcvk project +/// 3. Starts the VM with the initial image +/// 4. Modifies the Containerfile and builds v2 +/// 5. Triggers manual upgrade with `bcvk project ssh -A` +/// 6. Verifies the upgrade was applied in the VM +fn test_project_upgrade_workflow() -> Result<()> { + let temp_dir = TempDir::new().expect("Failed to create temp directory"); + let project_dir = + Utf8PathBuf::from_path_buf(temp_dir.path().to_path_buf()).expect("temp path is not UTF-8"); + + // Create initial Containerfile + let containerfile_path = project_dir.join("Containerfile"); + let initial_containerfile = r#"FROM quay.io/centos-bootc/centos-bootc:stream10 + +# Add a marker file for version 1 +RUN echo "version1" > /usr/share/test-version +"#; + std::fs::write(&containerfile_path, initial_containerfile) + .expect("Failed to write initial Containerfile"); + + // Build initial image + let image_name = "localhost/bcvk-test-project:latest"; + println!("Building initial test image: {}", image_name); + let build_output = Command::new("podman") + .args(&["build", "-t", image_name, "-f"]) + .arg(containerfile_path.as_str()) + .arg(project_dir.as_str()) + .output() + .expect("Failed to run podman build"); + + assert!( + build_output.status.success(), + "Initial podman build failed: {}", + String::from_utf8_lossy(&build_output.stderr) + ); + + // Create .bcvk directory and config.toml + let bcvk_dir = project_dir.join(".bcvk"); + std::fs::create_dir(&bcvk_dir).expect("Failed to create .bcvk directory"); + + let config_content = format!( + r#"[vm] +image = "{}" +memory = "2G" +cpus = 2 +disk-size = "10G" +"#, + image_name + ); + std::fs::write(bcvk_dir.join("config.toml"), config_content) + .expect("Failed to write config.toml"); + + let bcvk = get_bck_command()?; + + // Start the project VM (detached) + println!("Starting project VM..."); + let up_output = Command::new(&bcvk) + .args(&["project", "up"]) + .current_dir(&project_dir) + .env("BCVK_PROJECT_DIR", project_dir.as_str()) + .output() + .expect("Failed to run bcvk project up"); + + if !up_output.status.success() { + eprintln!("bcvk project up failed:"); + eprintln!("stdout: {}", String::from_utf8_lossy(&up_output.stdout)); + eprintln!("stderr: {}", String::from_utf8_lossy(&up_output.stderr)); + panic!("Failed to start project VM"); + } + + // Give VM time to boot + std::thread::sleep(std::time::Duration::from_secs(30)); + + // Verify version 1 is in the VM + println!("Verifying initial version..."); + let check_v1_output = Command::new(&bcvk) + .args(&["project", "ssh", "cat", "/usr/share/test-version"]) + .current_dir(&project_dir) + .output() + .expect("Failed to check initial version"); + + let v1_content = String::from_utf8_lossy(&check_v1_output.stdout); + assert!( + v1_content.contains("version1"), + "Initial version marker not found in VM. Output: {}", + v1_content + ); + + // Update Containerfile to version 2 + println!("Building updated image (v2)..."); + let updated_containerfile = r#"FROM quay.io/centos-bootc/centos-bootc:stream10 + +# Add a marker file for version 2 +RUN echo "version2" > /usr/share/test-version +"#; + std::fs::write(&containerfile_path, updated_containerfile) + .expect("Failed to write updated Containerfile"); + + // Build version 2 + let build_v2_output = Command::new("podman") + .args(&["build", "-t", image_name, "-f"]) + .arg(containerfile_path.as_str()) + .arg(project_dir.as_str()) + .output() + .expect("Failed to run podman build for v2"); + + assert!( + build_v2_output.status.success(), + "Version 2 podman build failed: {}", + String::from_utf8_lossy(&build_v2_output.stderr) + ); + + // Trigger upgrade with `bcvk project ssh -A` + println!("Triggering upgrade with `bcvk project ssh -A`..."); + let upgrade_output = Command::new(&bcvk) + .args(&["project", "ssh", "-A", "echo", "upgrade-complete"]) + .current_dir(&project_dir) + .output() + .expect("Failed to run bcvk project ssh -A"); + + if !upgrade_output.status.success() { + eprintln!("bcvk project ssh -A failed:"); + eprintln!( + "stdout: {}", + String::from_utf8_lossy(&upgrade_output.stdout) + ); + eprintln!( + "stderr: {}", + String::from_utf8_lossy(&upgrade_output.stderr) + ); + panic!("Failed to trigger upgrade"); + } + + let upgrade_stdout = String::from_utf8_lossy(&upgrade_output.stdout); + assert!( + upgrade_stdout.contains("upgrade-complete"), + "Upgrade command did not complete successfully" + ); + + // Check bootc status to verify new deployment is staged + println!("Checking bootc status for staged deployment..."); + let status_output = Command::new(&bcvk) + .args(&["project", "ssh", "bootc", "status", "--json"]) + .current_dir(&project_dir) + .output() + .expect("Failed to run bootc status"); + + let status_json = String::from_utf8_lossy(&status_output.stdout); + println!("bootc status output: {}", status_json); + + // Verify that status shows a staged deployment or that we have the new image + // The exact behavior depends on bootc version, but we should see some indication + // of the upgrade + assert!( + status_output.status.success(), + "bootc status failed: {}", + String::from_utf8_lossy(&status_output.stderr) + ); + + // Clean up - destroy the VM + println!("Cleaning up project VM..."); + let _down_output = Command::new(&bcvk) + .args(&["project", "down"]) + .current_dir(&project_dir) + .output() + .expect("Failed to run bcvk project down"); + + let _rm_output = Command::new(&bcvk) + .args(&["project", "rm"]) + .current_dir(&project_dir) + .output() + .expect("Failed to run bcvk project rm"); + + // Clean up the test image + let _rmi_output = Command::new("podman") + .args(&["rmi", "-f", image_name]) + .output() + .ok(); + + Ok(()) +} diff --git a/crates/kit/Cargo.toml b/crates/kit/Cargo.toml index 969427d..0417491 100644 --- a/crates/kit/Cargo.toml +++ b/crates/kit/Cargo.toml @@ -14,6 +14,7 @@ color-eyre = { workspace = true } clap = { version = "4.4", features = ["derive"] } clap_mangen = { version = "0.2.20", optional = true } data-encoding = { version = "2.9" } +dialoguer = { version = "0.11", features = ["fuzzy-select"] } dirs = "5.0" fn-error-context = { version = "0.2" } bootc-mount = { git = "https://github.com/bootc-dev/bootc", rev = "93b22f4dbc2d54f7cca7c1df3ee59fcdec0b2cf1" } @@ -21,7 +22,7 @@ bootc-utils = { git = "https://github.com/bootc-dev/bootc", rev = "93b22f4dbc2d5 indicatif = "0.17" notify = "6.1" thiserror = "1.0" -rustix = { "version" = "1", features = ["thread", "net", "fs", "pipe", "system", "process", "mount"] } +rustix = { "version" = "1", features = ["thread", "net", "fs", "pipe", "system", "process", "mount", "event"] } serde = { version = "1.0.199", features = ["derive"] } serde_json = "1.0.116" serde_yaml = "0.9" @@ -32,6 +33,8 @@ tracing-error = { workspace = true } shlex = "1" reqwest = { version = "0.12", features = ["blocking"] } tempfile = "3" +toml = "0.8" +toml_edit = "0.22" uuid = { version = "1.10", features = ["v4"] } xshell = { workspace = true } yaml-rust2 = "0.9" diff --git a/crates/kit/src/cache_metadata.rs b/crates/kit/src/cache_metadata.rs index db76070..79225c4 100644 --- a/crates/kit/src/cache_metadata.rs +++ b/crates/kit/src/cache_metadata.rs @@ -12,8 +12,6 @@ use crate::install_options::InstallOptions; use cap_std_ext::cap_std::{self, fs::Dir}; use cap_std_ext::dirext::CapStdExtDirExt; use color_eyre::{eyre::Context, Result}; -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; use std::ffi::OsStr; use std::fs::File; use std::path::Path; @@ -24,72 +22,27 @@ const BOOTC_CACHE_HASH_XATTR: &str = "user.bootc.cache_hash"; /// Extended attribute name for storing container image digest const BOOTC_IMAGE_DIGEST_XATTR: &str = "user.bootc.image_digest"; -/// Build inputs used to generate a cache hash -#[derive(Debug, Clone, Serialize, Deserialize)] -struct CacheInputs { - /// SHA256 digest of the source container image - image_digest: String, - - /// Filesystem type used for installation (e.g., "ext4", "xfs", "btrfs") - filesystem: Option, - - /// Root filesystem size if specified - root_size: Option, - - /// Whether to use composefs-native storage - composefs_native: bool, - - /// Kernel arguments used during installation - kernel_args: Vec, - - /// Version of the cache format for future compatibility - version: u32, -} - /// Metadata stored on disk images for caching purposes #[derive(Debug, Clone)] pub struct DiskImageMetadata { /// SHA256 digest of the source container image pub digest: String, - - /// Filesystem type used for installation (e.g., "ext4", "xfs", "btrfs") - pub filesystem: Option, - - /// Root filesystem size if specified - pub root_size: Option, - - /// Whether to use composefs-native storage - pub composefs_native: bool, - - /// Kernel arguments used during installation - pub kernel_args: Vec, - - /// Version of the metadata format for future compatibility - pub version: u32, } impl DiskImageMetadata { /// Generate SHA256 hash of all build inputs - pub fn compute_cache_hash(&self) -> String { - let inputs = CacheInputs { - image_digest: self.digest.clone(), - filesystem: self.filesystem.clone(), - root_size: self.root_size.clone(), - composefs_native: self.composefs_native, - kernel_args: self.kernel_args.clone(), - version: self.version, - }; - - let json = serde_json::to_string(&inputs).expect("Failed to serialize cache inputs"); - let mut hasher = Sha256::new(); - hasher.update(json.as_bytes()); - format!("sha256:{:x}", hasher.finalize()) + /// + /// Delegates to InstallOptions::compute_hash() to avoid duplication. + /// This ensures the hash includes all fields that affect the generated disk, + /// including target_transport. + pub fn compute_cache_hash(&self, install_options: &InstallOptions) -> String { + install_options.compute_hash(&self.digest) } /// Write metadata to a file using extended attributes via rustix - pub fn write_to_file(&self, file: &File) -> Result<()> { + pub fn write_to_file(&self, file: &File, install_options: &InstallOptions) -> Result<()> { // Write the cache hash - let cache_hash = self.compute_cache_hash(); + let cache_hash = self.compute_cache_hash(install_options); rustix::fs::fsetxattr( file, BOOTC_CACHE_HASH_XATTR, @@ -152,15 +105,10 @@ impl DiskImageMetadata { } impl DiskImageMetadata { - /// Create new metadata from InstallOptions and image digest - pub fn from(options: &InstallOptions, image: &str) -> Self { + /// Create new metadata from image digest + pub fn from(_options: &InstallOptions, image: &str) -> Self { Self { - version: 1, digest: image.to_owned(), - filesystem: options.filesystem.clone(), - root_size: options.root_size.clone(), - kernel_args: options.karg.clone(), - composefs_native: options.composefs_native, } } } @@ -186,9 +134,8 @@ pub fn check_cached_disk( return Ok(Err(ValidationError::MissingFile)); } - // Create metadata for the current request to compute expected hash - let expected_meta = DiskImageMetadata::from(install_options, image_digest); - let expected_hash = expected_meta.compute_cache_hash(); + // Compute expected hash directly from install options + let expected_hash = install_options.compute_hash(image_digest); // Read the cache hash from the disk image let parent = path @@ -242,32 +189,29 @@ mod tests { root_size: Some("20G".to_string()), ..Default::default() }; - let metadata1 = DiskImageMetadata::from(&install_options1, "sha256:abc123"); let install_options2 = InstallOptions { filesystem: Some("ext4".to_string()), root_size: Some("20G".to_string()), ..Default::default() }; - let metadata2 = DiskImageMetadata::from(&install_options2, "sha256:abc123"); // Same inputs should generate same hash assert_eq!( - metadata1.compute_cache_hash(), - metadata2.compute_cache_hash() + install_options1.compute_hash("sha256:abc123"), + install_options2.compute_hash("sha256:abc123") ); - // Different inputs should generate different hashes + // Different image digest should generate different hash let install_options3 = InstallOptions { filesystem: Some("ext4".to_string()), root_size: Some("20G".to_string()), ..Default::default() }; - let metadata3 = DiskImageMetadata::from(&install_options3, "sha256:xyz789"); assert_ne!( - metadata1.compute_cache_hash(), - metadata3.compute_cache_hash() + install_options1.compute_hash("sha256:abc123"), + install_options3.compute_hash("sha256:xyz789") ); // Different filesystem should generate different hash @@ -276,33 +220,23 @@ mod tests { root_size: Some("20G".to_string()), ..Default::default() }; - let metadata4 = DiskImageMetadata::from(&install_options4, "sha256:abc123"); assert_ne!( - metadata1.compute_cache_hash(), - metadata4.compute_cache_hash() + install_options1.compute_hash("sha256:abc123"), + install_options4.compute_hash("sha256:abc123") ); - } - #[test] - fn test_cache_inputs_serialization() -> Result<()> { - let inputs = CacheInputs { - image_digest: "sha256:abc123".to_string(), + // Different target_transport should generate different hash + let mut install_options5 = InstallOptions { filesystem: Some("ext4".to_string()), root_size: Some("20G".to_string()), - kernel_args: vec!["console=ttyS0".to_string()], - composefs_native: false, - version: 1, + ..Default::default() }; + install_options5.target_transport = Some("containers-storage".to_string()); - let json = serde_json::to_string(&inputs)?; - let deserialized: CacheInputs = serde_json::from_str(&json)?; - - assert_eq!(inputs.image_digest, deserialized.image_digest); - assert_eq!(inputs.filesystem, deserialized.filesystem); - assert_eq!(inputs.root_size, deserialized.root_size); - assert_eq!(inputs.kernel_args, deserialized.kernel_args); - assert_eq!(inputs.version, deserialized.version); - Ok(()) + assert_ne!( + install_options1.compute_hash("sha256:abc123"), + install_options5.compute_hash("sha256:abc123") + ); } } diff --git a/crates/kit/src/common_opts.rs b/crates/kit/src/common_opts.rs index e90ab58..e11ff56 100644 --- a/crates/kit/src/common_opts.rs +++ b/crates/kit/src/common_opts.rs @@ -5,6 +5,9 @@ use serde::{Deserialize, Serialize}; use std::fmt; pub const DEFAULT_MEMORY_USER_STR: &str = "4G"; +pub const DEFAULT_CPUS: u32 = 2; +pub const DEFAULT_DISK_SIZE: &str = "20G"; +pub const DEFAULT_NETWORK: &str = "user"; /// Memory size options #[derive(Parser, Debug, Clone, Default, Serialize, Deserialize)] @@ -14,11 +17,78 @@ pub struct MemoryOpts { default_value = DEFAULT_MEMORY_USER_STR, help = "Memory size (e.g. 4G, 2048M, or plain number for MB)" )] + #[serde(default = "default_memory")] pub memory: String, } +fn default_memory() -> String { + DEFAULT_MEMORY_USER_STR.to_string() +} + impl fmt::Display for MemoryOpts { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.memory) } } + +/// CPU count options +#[derive(Parser, Debug, Clone, Serialize, Deserialize)] +pub struct CpuOpts { + #[clap(long, default_value_t = DEFAULT_CPUS, help = "Number of virtual CPUs")] + #[serde(default = "default_cpus")] + pub cpus: u32, +} + +impl Default for CpuOpts { + fn default() -> Self { + Self { cpus: DEFAULT_CPUS } + } +} + +fn default_cpus() -> u32 { + DEFAULT_CPUS +} + +/// Disk size options +#[derive(Parser, Debug, Clone, Serialize, Deserialize)] +pub struct DiskSizeOpts { + #[clap( + long, + default_value = DEFAULT_DISK_SIZE, + help = "Disk size (e.g. 20G, 10240M, or plain number for bytes)" + )] + #[serde(default = "default_disk_size", rename = "disk-size")] + pub disk_size: String, +} + +impl Default for DiskSizeOpts { + fn default() -> Self { + Self { + disk_size: DEFAULT_DISK_SIZE.to_string(), + } + } +} + +fn default_disk_size() -> String { + DEFAULT_DISK_SIZE.to_string() +} + +/// Network mode options +#[derive(Parser, Debug, Clone, Serialize, Deserialize)] +pub struct NetworkOpts { + #[clap(long, default_value = DEFAULT_NETWORK, help = "Network mode for the VM")] + #[serde(default = "default_network")] + pub network: String, +} + +impl Default for NetworkOpts { + fn default() -> Self { + Self { + network: DEFAULT_NETWORK.to_string(), + } + } +} + +fn default_network() -> String { + DEFAULT_NETWORK.to_string() +} diff --git a/crates/kit/src/domain_list.rs b/crates/kit/src/domain_list.rs index 691aed6..c0cd618 100644 --- a/crates/kit/src/domain_list.rs +++ b/crates/kit/src/domain_list.rs @@ -5,6 +5,7 @@ use crate::xml_utils; use base64::Engine; +use camino::Utf8PathBuf; use color_eyre::{eyre::Context, Result}; use serde::{Deserialize, Serialize}; use std::process::Command; @@ -35,6 +36,8 @@ pub struct PodmanBootcDomain { pub has_ssh_key: bool, /// SSH private key (available only when outputting JSON) pub ssh_private_key: Option, + /// Project directory path (for project-scoped VMs) + pub project_dir: Option, } impl PodmanBootcDomain { @@ -219,6 +222,11 @@ impl DomainLister { let ssh_private_key = extract_ssh_private_key(dom); let has_ssh_key = ssh_private_key.is_some(); + // Extract project directory + let project_dir = dom + .find_with_namespace("project-dir") + .map(|node| Utf8PathBuf::from(node.text_content())); + Ok(Some(PodmanBootcDomainMetadata { source_image, created, @@ -229,6 +237,7 @@ impl DomainLister { ssh_port, has_ssh_key, ssh_private_key, + project_dir, })) } @@ -264,6 +273,7 @@ impl DomainLister { ssh_port: metadata.as_ref().and_then(|m| m.ssh_port), has_ssh_key: metadata.as_ref().map(|m| m.has_ssh_key).unwrap_or(false), ssh_private_key: metadata.as_ref().and_then(|m| m.ssh_private_key.clone()), + project_dir: metadata.as_ref().and_then(|m| m.project_dir.clone()), }) } @@ -329,6 +339,7 @@ struct PodmanBootcDomainMetadata { ssh_port: Option, has_ssh_key: bool, ssh_private_key: Option, + project_dir: Option, } /// Extract disk path from domain XML using DOM parser @@ -451,6 +462,7 @@ mod tests { ssh_port: None, has_ssh_key: false, ssh_private_key: None, + project_dir: None, }; assert!(domain.is_running()); @@ -469,6 +481,7 @@ mod tests { ssh_port: None, has_ssh_key: false, ssh_private_key: None, + project_dir: None, }; assert!(!stopped_domain.is_running()); diff --git a/crates/kit/src/install_options.rs b/crates/kit/src/install_options.rs index 961d83e..70d5468 100644 --- a/crates/kit/src/install_options.rs +++ b/crates/kit/src/install_options.rs @@ -6,43 +6,99 @@ use camino::Utf8PathBuf; use clap::Parser; +use serde::Serialize; +use sha2::{Digest, Sha256}; /// Common installation options for bootc disk operations /// /// These options control filesystem configuration and storage paths /// for bootc installation commands. Use `#[clap(flatten)]` to include /// these in command-specific option structures. -#[derive(Debug, Default, Parser, Clone)] +#[derive(Debug, Default, Parser, Clone, Serialize)] pub struct InstallOptions { /// Root filesystem type (overrides bootc image default) #[clap(long, help = "Root filesystem type (e.g. ext4, xfs, btrfs)")] + #[serde(skip_serializing_if = "Option::is_none")] pub filesystem: Option, /// Custom root filesystem size (e.g., '10G', '5120M') #[clap(long, help = "Root filesystem size (e.g., '10G', '5120M')")] + #[serde(skip_serializing_if = "Option::is_none")] pub root_size: Option, /// Path to host container storage (auto-detected if not specified) + /// NOTE: This does NOT affect the generated disk content, only where to find the source image #[clap( long, help = "Path to host container storage (auto-detected if not specified)" )] + #[serde(skip)] pub storage_path: Option, #[clap(long)] /// Set a kernel argument + #[serde(skip_serializing_if = "Vec::is_empty")] pub karg: Vec, /// Default to composefs-native storage #[clap(long)] pub composefs_native: bool, + + /// Target transport for image pulling (e.g., "containers-storage") + /// Not exposed via CLI - set programmatically when needed + #[clap(skip)] + #[serde(skip_serializing_if = "Option::is_none")] + pub target_transport: Option, +} + +/// Internal structure for computing cache hash +/// Wraps InstallOptions with additional metadata +#[derive(Serialize)] +struct CacheHashInputs<'a> { + /// SHA256 digest of the source container image + image_digest: &'a str, + + /// All install options that affect the generated disk + /// (storage_path is excluded via #[serde(skip)]) + #[serde(flatten)] + options: &'a InstallOptions, + + /// Version of the cache format for future compatibility + version: u32, } impl InstallOptions { + /// Compute SHA256 hash of all inputs that affect the generated disk + /// + /// This hash is used for cache validation. Any new field added to InstallOptions + /// will automatically affect the hash (unless marked with #[serde(skip)] or + /// #[serde(skip_serializing_if)]). + /// + /// Fields excluded from hash: + /// - storage_path: Only affects where to find the source image, not disk content + /// - Option fields when None: Skipped to maintain hash stability + pub fn compute_hash(&self, image_digest: &str) -> String { + let inputs = CacheHashInputs { + image_digest, + options: self, + version: 1, + }; + + let json = serde_json::to_string(&inputs).expect("Failed to serialize cache inputs"); + let mut hasher = Sha256::new(); + hasher.update(json.as_bytes()); + format!("sha256:{:x}", hasher.finalize()) + } + /// Get the bootc install command arguments for these options pub fn to_bootc_args(&self) -> Vec { let mut args = vec![]; + if let Some(ref target_transport) = self.target_transport { + args.push("--target-transport".to_string()); + args.push(target_transport.clone()); + } + if let Some(ref filesystem) = self.filesystem { args.push("--filesystem".to_string()); args.push(filesystem.clone()); diff --git a/crates/kit/src/libvirt/base_disks.rs b/crates/kit/src/libvirt/base_disks.rs index 3a6d88c..6921308 100644 --- a/crates/kit/src/libvirt/base_disks.rs +++ b/crates/kit/src/libvirt/base_disks.rs @@ -20,7 +20,7 @@ pub fn find_or_create_base_disk( connect_uri: Option<&str>, ) -> Result { let metadata = DiskImageMetadata::from(install_options, image_digest); - let cache_hash = metadata.compute_cache_hash(); + let cache_hash = metadata.compute_cache_hash(install_options); // Extract short hash for filename (first 16 chars after "sha256:") let short_hash = cache_hash diff --git a/crates/kit/src/libvirt/lifecycle_monitor.rs b/crates/kit/src/libvirt/lifecycle_monitor.rs new file mode 100644 index 0000000..407fe2d --- /dev/null +++ b/crates/kit/src/libvirt/lifecycle_monitor.rs @@ -0,0 +1,305 @@ +//! Process lifecycle monitor for parent process binding +//! +//! This module implements background monitoring of a parent process and executing a command +//! when the parent exits. Used by the `--lifecycle-bind-parent` flag to automatically shut +//! down VMs when the parent `bcvk` process exits. +//! +//! # Architecture +//! +//! ## pidfd-Based Monitoring with Fallback +//! +//! The monitor uses modern Linux kernel features for efficient parent process monitoring: +//! +//! 1. **pidfd + poll()** - Opens a pidfd for the parent process and blocks on `poll()` waiting +//! for it to become readable (process exit) +//! 2. **Fallback to /proc polling** - If `pidfd_open()` fails (ENOSYS/EPERM), falls back to +//! polling `/proc//` every 1 second +//! 3. **Signal Handlers** - Tokio signal handlers for SIGTERM and SIGINT +//! 4. **Event Loop** - `tokio::select!` waits for any of these events +//! +//! ## Key Design Decisions +//! +//! **pidfd over set_parent_process_death_signal:** +//! - `set_parent_process_death_signal` doesn't work when parent exits immediately +//! - pidfd allows monitoring an arbitrary PID, not just direct parent +//! - Efficient: `poll()` blocks until process exits (no busy polling) +//! +//! **Graceful Fallback:** +//! - pidfd requires Linux kernel 5.3+ (2019) +//! - Falls back to `/proc` polling if unsupported +//! - Handles ENOSYS (kernel too old) and EPERM (permission denied) +//! +//! **Generic Command Execution:** +//! - Accepts arbitrary command to run when parent exits +//! - Makes it testable with simple commands like `echo "test"` +//! - Generalizable for any cleanup action, not just VM shutdown +//! +//! # Usage +//! +//! ## Automatic (Project VMs) +//! +//! ```bash +//! bcvk project up # Lifecycle binding enabled by default +//! ``` +//! +//! ## Explicit +//! +//! ```bash +//! bcvk libvirt run --lifecycle-bind-parent --name my-vm quay.io/fedora/fedora-bootc:42 +//! ``` +//! +//! ## Direct (Testing/Debugging) +//! +//! ```bash +//! # Monitor a specific PID and run command when it exits +//! bcvk internals lifecycle-monitor 12345 virsh shutdown my-vm +//! +//! # Use "parent" to monitor the actual parent process +//! bcvk internals lifecycle-monitor parent echo "Parent exited" +//! +//! # With libvirt connection URI +//! bcvk internals lifecycle-monitor 12345 virsh -c qemu:///session shutdown my-vm +//! ``` +//! +//! # Testing +//! +//! ```bash +//! # Test with a temporary process +//! sleep 10 & +//! TEST_PID=$! +//! bcvk internals lifecycle-monitor $TEST_PID echo "Sleep process exited" +//! # The monitor will print the message when sleep exits after 10 seconds +//! ``` + +use clap::Parser; +use color_eyre::{eyre::Context, Result}; +use rustix::event::{poll, PollFd, PollFlags}; +use rustix::fd::OwnedFd; +use rustix::process::{getppid, pidfd_open, PidfdFlags}; +use std::path::Path; +use std::process::Command; +use std::time::Duration; +use tokio::signal::unix::{signal, SignalKind}; +use tracing::debug; + +/// Internal command to monitor parent process and execute command on exit +#[derive(Debug, Parser)] +pub struct LifecycleMonitorOpts { + /// Parent process ID to monitor (numeric PID or "parent" for actual parent process) + pub parent_pid: String, + + /// Command and arguments to run when parent exits + #[clap(trailing_var_arg = true, required = true)] + pub command: Vec, +} + +/// Resolve the parent PID string to a numeric PID +fn resolve_parent_pid(pid_str: &str) -> Result { + if pid_str == "parent" { + // Get the actual parent process ID using rustix + let ppid = + getppid().ok_or_else(|| color_eyre::eyre::eyre!("Failed to get parent process ID"))?; + Ok(ppid.as_raw_nonzero().get() as u32) + } else { + // Parse as numeric PID + pid_str.parse::().with_context(|| { + format!( + "Invalid PID: '{}' (expected numeric PID or 'parent')", + pid_str + ) + }) + } +} + +/// Execute the lifecycle monitor +#[allow(unsafe_code)] +pub async fn run_async(opts: LifecycleMonitorOpts) -> Result<()> { + let parent_pid = resolve_parent_pid(&opts.parent_pid)?; + let command = opts.command.clone(); + + debug!( + "Starting lifecycle monitor for parent PID {} (command: {:?})", + parent_pid, command + ); + + // Try to open pidfd for the parent process, fall back to polling if unsupported + let wait_method = match open_pidfd(parent_pid) { + Ok(pidfd) => { + debug!("Using pidfd for parent process monitoring"); + WaitMethod::Pidfd(pidfd) + } + Err(e) => { + debug!("pidfd_open failed ({}), falling back to /proc polling", e); + WaitMethod::ProcPolling + } + }; + + // Set up signal handlers for SIGTERM and SIGINT + let mut sigterm = + signal(SignalKind::terminate()).context("Failed to create SIGTERM handler")?; + let mut sigint = signal(SignalKind::interrupt()).context("Failed to create SIGINT handler")?; + + debug!( + "Monitoring parent process {} and waiting for shutdown signals (SIGTERM, SIGINT)", + parent_pid + ); + + // Spawn blocking task to wait for parent exit + let wait_task = + tokio::task::spawn_blocking(move || wait_for_parent_exit(wait_method, parent_pid)); + + // Wait for either parent exit or signals + tokio::select! { + result = wait_task => { + match result { + Ok(Ok(())) => debug!("Parent process {} exited", parent_pid), + Ok(Err(e)) => debug!("Error waiting for parent process: {}", e), + Err(e) => debug!("Wait task panicked: {}", e), + } + } + _ = sigterm.recv() => { + debug!("SIGTERM received"); + } + _ = sigint.recv() => { + debug!("SIGINT received"); + } + } + + debug!( + "Shutdown trigger received, executing command: {:?}", + command + ); + + // Execute the command + if let Err(e) = execute_command(&command) { + debug!("Failed to execute command: {}", e); + std::process::exit(1); + } + + // Exit the process immediately without waiting for tokio runtime shutdown. + // This is important because the spawned blocking task may still be running + // (blocking on poll), and rt.block_on() won't return until all tasks complete. + // Using process::exit() bypasses the runtime shutdown and exits immediately. + std::process::exit(0); +} + +/// Method for waiting on parent process exit +enum WaitMethod { + /// Use pidfd (modern, efficient) + Pidfd(OwnedFd), + /// Use /proc polling (fallback) + ProcPolling, +} + +/// Try to open pidfd for the parent process, handling errors for unsupported systems +fn open_pidfd(pid: u32) -> Result { + let pid_raw = rustix::process::Pid::from_raw(pid as i32) + .ok_or_else(|| color_eyre::eyre::eyre!("Invalid PID: {}", pid))?; + + match pidfd_open(pid_raw, PidfdFlags::empty()) { + Ok(fd) => Ok(fd), + Err(rustix::io::Errno::NOSYS) => { + Err(color_eyre::eyre::eyre!("pidfd_open not supported (ENOSYS)")) + } + Err(rustix::io::Errno::PERM) => Err(color_eyre::eyre::eyre!( + "pidfd_open permission denied (EPERM)" + )), + Err(e) => Err(color_eyre::eyre::eyre!("pidfd_open failed: {}", e)), + } +} + +/// Wait for parent process to exit using the specified method +fn wait_for_parent_exit(method: WaitMethod, pid: u32) -> Result<()> { + match method { + WaitMethod::Pidfd(pidfd) => wait_for_pidfd(pidfd), + WaitMethod::ProcPolling => wait_for_proc_exit(pid), + } +} + +/// Wait for pidfd to become readable (parent process exit) using poll() +fn wait_for_pidfd(pidfd: OwnedFd) -> Result<()> { + let mut poll_fds = [PollFd::new(&pidfd, PollFlags::IN)]; + + // Block until pidfd is readable (process exits) or error + // Pass None for infinite timeout + loop { + match poll(&mut poll_fds, None) { + Ok(_) => { + // Check if POLLIN is set (process exited) + let revents = poll_fds[0].revents(); + if revents.contains(PollFlags::IN) { + debug!("Pidfd became readable - parent process exited"); + return Ok(()); + } + // If other event, continue polling + debug!("Poll returned with revents: {:?}", revents); + } + Err(rustix::io::Errno::INTR) => { + // Interrupted by signal, continue + debug!("Poll interrupted by signal, continuing"); + continue; + } + Err(e) => { + return Err(color_eyre::eyre::eyre!("poll() failed: {}", e)); + } + } + } +} + +/// Wait for parent process to exit by polling /proc (fallback) +fn wait_for_proc_exit(pid: u32) -> Result<()> { + let proc_path = format!("/proc/{}", pid); + + loop { + if !Path::new(&proc_path).exists() { + debug!("Process {} no longer exists in /proc", pid); + return Ok(()); + } + + std::thread::sleep(Duration::from_secs(1)); + } +} + +/// Synchronous wrapper for async run +pub fn run(opts: LifecycleMonitorOpts) -> Result<()> { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .context("Failed to create tokio runtime")?; + + rt.block_on(run_async(opts)) +} + +/// Execute the configured command without waiting for completion +fn execute_command(command: &[String]) -> Result<()> { + if command.is_empty() { + return Err(color_eyre::eyre::eyre!("No command specified")); + } + + debug!("Executing command: {:?}", command); + + let mut cmd = Command::new(&command[0]); + if command.len() > 1 { + cmd.args(&command[1..]); + } + + let output = cmd + .output() + .with_context(|| format!("Failed to execute command: {:?}", command))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + debug!( + "Command {:?} failed with status {}: {}", + command, output.status, stderr + ); + } else { + let stdout = String::from_utf8_lossy(&output.stdout); + if !stdout.is_empty() { + debug!("Command output: {}", stdout); + } + debug!("Command executed successfully: {:?}", command); + } + + Ok(()) +} diff --git a/crates/kit/src/libvirt/mod.rs b/crates/kit/src/libvirt/mod.rs index df89e49..eabdb2c 100644 --- a/crates/kit/src/libvirt/mod.rs +++ b/crates/kit/src/libvirt/mod.rs @@ -23,10 +23,14 @@ pub const LIBVIRT_DEFAULT_MEMORY: &str = "4G"; /// Default disk size for libvirt base disks pub const LIBVIRT_DEFAULT_DISK_SIZE: &str = "20G"; +/// Default filesystem for libvirt VMs +pub const LIBVIRT_DEFAULT_FILESYSTEM: &str = "ext4"; + pub mod base_disks; pub mod base_disks_cli; pub mod domain; pub mod inspect; +pub mod lifecycle_monitor; pub mod list; pub mod list_volumes; pub mod rm; diff --git a/crates/kit/src/libvirt/run.rs b/crates/kit/src/libvirt/run.rs index 28edb4f..51fbffe 100644 --- a/crates/kit/src/libvirt/run.rs +++ b/crates/kit/src/libvirt/run.rs @@ -241,6 +241,10 @@ pub struct LibvirtRunOpts { #[clap(long)] pub transient: bool, + /// Bind VM lifecycle to parent process (shutdown VM when parent exits) + #[clap(long)] + pub lifecycle_bind_parent: bool, + /// Additional metadata key-value pairs (used internally, not exposed via CLI) #[clap(skip)] pub metadata: std::collections::HashMap, @@ -334,6 +338,13 @@ pub fn run(global_opts: &crate::libvirt::LibvirtOptions, opts: LibvirtRunOpts) - // VM is now managed by libvirt, no need to track separately + // Spawn lifecycle monitor if requested + if opts.lifecycle_bind_parent { + spawn_lifecycle_monitor(&vm_name, connect_uri) + .with_context(|| "Failed to spawn lifecycle monitor")?; + println!("Lifecycle monitor started for domain '{}'", vm_name); + } + println!("VM '{}' created successfully!", vm_name); println!(" Image: {}", opts.image); println!(" Disk: {}", disk_path); @@ -403,6 +414,64 @@ pub fn run(global_opts: &crate::libvirt::LibvirtOptions, opts: LibvirtRunOpts) - } } +/// Spawn a background lifecycle monitor process for the VM +pub(crate) fn spawn_lifecycle_monitor(domain_name: &str, connect_uri: Option<&str>) -> Result<()> { + use std::process::{Command, Stdio}; + + // Get the current executable path for spawning the monitor + let current_exe = + std::env::current_exe().with_context(|| "Failed to get current executable path")?; + + // Get the parent process PID (the shell) to monitor + let parent_pid = rustix::process::getppid() + .ok_or_else(|| color_eyre::eyre::eyre!("Failed to get parent process ID"))?; + let parent_pid_num = parent_pid.as_raw_nonzero().get() as u32; + + debug!( + "Spawning lifecycle monitor for domain '{}' (parent PID: {})", + domain_name, parent_pid_num + ); + + // Build the virsh shutdown command + let mut virsh_args = vec!["virsh".to_string()]; + if let Some(uri) = connect_uri { + virsh_args.push("-c".to_string()); + virsh_args.push(uri.to_string()); + } + virsh_args.push("shutdown".to_string()); + virsh_args.push(domain_name.to_string()); + + // Build the command to spawn the monitor: + // internals lifecycle-monitor virsh [-c ] shutdown + let mut cmd = Command::new(¤t_exe); + cmd.arg("internals") + .arg("lifecycle-monitor") + .arg(parent_pid_num.to_string()) + .args(&virsh_args); + + // Detach the process: redirect stdio to /dev/null and spawn in background + cmd.stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + + // Spawn the process + let child = cmd.spawn().with_context(|| { + format!( + "Failed to spawn lifecycle monitor process for domain '{}'", + domain_name + ) + })?; + + debug!( + "Lifecycle monitor spawned with PID {} for domain '{}' (command: {:?})", + child.id(), + domain_name, + virsh_args + ); + + Ok(()) +} + /// Determine the appropriate default storage pool path based on connection type fn get_default_pool_path(connect_uri: &str) -> Utf8PathBuf { if connect_uri.contains("/session") { @@ -749,30 +818,6 @@ fn process_bind_mounts( Ok(domain_builder) } -/// Check if the libvirt version supports readonly virtiofs filesystems -/// Requires libvirt 11.0+ and modern QEMU with rust-based virtiofsd -fn check_libvirt_readonly_support() -> Result<()> { - let version = crate::libvirt::status::parse_libvirt_version() - .with_context(|| "Failed to check libvirt version")?; - - if crate::libvirt::status::supports_readonly_virtiofs(&version) { - Ok(()) - } else { - match version { - Some(v) => Err(color_eyre::eyre::eyre!( - "The --bind-storage-ro flag requires libvirt 11.0 or later for readonly virtiofs support. \ - Current version: {}", - v.full_version - )), - None => Err(color_eyre::eyre::eyre!( - "Could not parse libvirt version. \ - The --bind-storage-ro flag requires libvirt 11.0+ with rust-based virtiofsd support. \ - Please ensure you have a compatible libvirt version installed." - )) - } - } -} - #[cfg(test)] mod tests { use super::*; @@ -958,7 +1003,7 @@ fn create_libvirt_domain_from_disk( opts.install .filesystem .as_ref() - .unwrap_or(&"ext4".to_string()), + .unwrap_or(&crate::libvirt::LIBVIRT_DEFAULT_FILESYSTEM.to_string()), ) .with_metadata("bootc:network", &opts.network) .with_metadata("bootc:ssh-generated", "true") @@ -1017,6 +1062,16 @@ fn create_libvirt_domain_from_disk( let mut mount_unit_smbios_creds = Vec::new(); let mut mount_unit_names = Vec::new(); + // Check if libvirt supports readonly virtiofs + let version = crate::libvirt::status::parse_libvirt_version() + .with_context(|| "Failed to check libvirt version")?; + let supports_readonly = crate::libvirt::status::supports_readonly_virtiofs(&version); + + // Log once if we're falling back readonly mounts to read-write + if !supports_readonly && (!opts.bind_mounts_ro.is_empty() || opts.bind_storage_ro) { + info!("Libvirt version does not support readonly virtiofs; using read-write bind mounts"); + } + // Process bind mounts (read-write and read-only) domain_builder = process_bind_mounts( &opts.bind_mounts, @@ -1027,20 +1082,30 @@ fn create_libvirt_domain_from_disk( &mut mount_unit_names, )?; - domain_builder = process_bind_mounts( - &opts.bind_mounts_ro, - "bcvk-bind-ro-", - true, - domain_builder, - &mut mount_unit_smbios_creds, - &mut mount_unit_names, - )?; + // Process readonly bind mounts - fall back to read-write if not supported + if supports_readonly { + domain_builder = process_bind_mounts( + &opts.bind_mounts_ro, + "bcvk-bind-ro-", + true, + domain_builder, + &mut mount_unit_smbios_creds, + &mut mount_unit_names, + )?; + } else { + // Fall back to read-write mounts + domain_builder = process_bind_mounts( + &opts.bind_mounts_ro, + "bcvk-bind-", + false, + domain_builder, + &mut mount_unit_smbios_creds, + &mut mount_unit_names, + )?; + } // Add container storage mount if requested if opts.bind_storage_ro { - // Check libvirt version compatibility for readonly virtiofs - check_libvirt_readonly_support().context("libvirt version compatibility check failed")?; - let storage_path = crate::utils::detect_container_storage_path() .context("Failed to detect container storage path.")?; crate::utils::validate_container_storage_path(&storage_path) @@ -1051,10 +1116,11 @@ fn create_libvirt_domain_from_disk( storage_path ); + // Use readonly if supported, otherwise fall back to read-write let virtiofs_fs = VirtiofsFilesystem { source_dir: storage_path.to_string(), tag: "hoststorage".to_string(), - readonly: true, + readonly: supports_readonly, }; domain_builder = domain_builder diff --git a/crates/kit/src/libvirt/status.rs b/crates/kit/src/libvirt/status.rs index c09bdbe..ba21236 100644 --- a/crates/kit/src/libvirt/status.rs +++ b/crates/kit/src/libvirt/status.rs @@ -7,6 +7,7 @@ use clap::Parser; use color_eyre::{eyre::Context, Result}; use serde::{Deserialize, Serialize}; use std::process::Command; +use std::sync::OnceLock; use crate::domain_list::DomainLister; @@ -28,7 +29,7 @@ pub enum OutputFormat { } /// libvirt version information -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct LibvirtVersion { pub major: u32, pub minor: u32, @@ -97,7 +98,7 @@ fn parse_libvirt_version_from_output(version_output: &str) -> Option Result> { +fn parse_libvirt_version_uncached() -> Result> { let output = Command::new("virsh") .args(&["version"]) .output() @@ -113,6 +114,22 @@ pub fn parse_libvirt_version() -> Result> { Ok(parse_libvirt_version_from_output(&version_output)) } +/// Cached libvirt version (parsed once per process) +static LIBVIRT_VERSION: OnceLock> = OnceLock::new(); + +/// Get the cached libvirt version, parsing it on first call +pub fn parse_libvirt_version() -> Result> { + // If already cached, clone and return + if let Some(version) = LIBVIRT_VERSION.get() { + return Ok(version.clone()); + } + + // Parse version and cache it + let version = parse_libvirt_version_uncached()?; + let _ = LIBVIRT_VERSION.set(version.clone()); + Ok(version) +} + /// Check if libvirt supports readonly virtiofs pub fn supports_readonly_virtiofs(version: &Option) -> bool { match version { diff --git a/crates/kit/src/main.rs b/crates/kit/src/main.rs index 815f092..695b0b2 100644 --- a/crates/kit/src/main.rs +++ b/crates/kit/src/main.rs @@ -23,6 +23,7 @@ mod libvirt; mod libvirt_upload_disk; #[allow(dead_code)] mod podman; +mod project; #[allow(dead_code)] mod qemu; mod qemu_img; @@ -95,6 +96,10 @@ enum InternalsCmds { /// Dump CLI structure as JSON for man page generation #[cfg(feature = "docgen")] DumpCliJson, + + /// Monitor parent process and shutdown VM when parent exits (internal use only) + #[clap(hide = true)] + LifecycleMonitor(libvirt::lifecycle_monitor::LifecycleMonitorOpts), } /// Available bcvk commands for container and VM management. @@ -109,7 +114,7 @@ enum Commands { Images(images::ImagesOpts), /// Manage ephemeral VMs for bootc containers - #[clap(subcommand)] + #[clap(subcommand, alias = "e")] Ephemeral(ephemeral::EphemeralCommands), /// Install bootc images to persistent disk images @@ -117,6 +122,7 @@ enum Commands { ToDisk(to_disk::ToDiskOpts), /// Manage libvirt integration for bootc containers + #[clap(alias = "v")] Libvirt { /// Hypervisor connection URI (e.g., qemu:///system, qemu+ssh://host/system) #[clap(short = 'c', long = "connect", global = true)] @@ -126,6 +132,10 @@ enum Commands { command: libvirt::LibvirtSubcommands, }, + /// Project-scoped VM management (Vagrant-like workflow) + #[clap(subcommand, alias = "p")] + Project(project::ProjectSubcommands), + /// Upload bootc disk images to libvirt (deprecated) #[clap(name = "libvirt-upload-disk", hide = true)] LibvirtUploadDisk(libvirt_upload_disk::LibvirtUploadDiskOpts), @@ -219,6 +229,14 @@ fn main() -> Result<(), Report> { } } } + Commands::Project(cmd) => match cmd { + project::ProjectSubcommands::Init(opts) => project::init::run(opts)?, + project::ProjectSubcommands::Up(opts) => project::up::run(opts)?, + project::ProjectSubcommands::Ssh(opts) => project::ssh::run(opts)?, + project::ProjectSubcommands::Down(opts) => project::down::run(opts)?, + project::ProjectSubcommands::Rm(opts) => project::rm::run(opts)?, + project::ProjectSubcommands::Ls(opts) => project::ls::run(opts)?, + }, Commands::LibvirtUploadDisk(opts) => { eprintln!( "Warning: 'libvirt-upload-disk' is deprecated. Use 'libvirt upload' instead." @@ -252,6 +270,9 @@ fn main() -> Result<(), Report> { let json = cli_json::dump_cli_json()?; println!("{}", json); } + InternalsCmds::LifecycleMonitor(opts) => { + libvirt::lifecycle_monitor::run(opts)?; + } }, } tracing::debug!("exiting"); diff --git a/crates/kit/src/project/config.rs b/crates/kit/src/project/config.rs new file mode 100644 index 0000000..07d3e7e --- /dev/null +++ b/crates/kit/src/project/config.rs @@ -0,0 +1,210 @@ +//! Project configuration file parsing for `.bcvk/config.toml` + +use camino::{Utf8Path, Utf8PathBuf}; +use color_eyre::{eyre::Context as _, Result}; +use serde::{Deserialize, Serialize}; +use std::fs; + +/// Configuration file name within project directory +pub const CONFIG_DIR: &str = ".bcvk"; +pub const CONFIG_FILE: &str = "config.toml"; + +/// Project configuration loaded from `.bcvk/config.toml` +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct ProjectConfig { + /// Project metadata + pub project: Option, + + /// VM configuration + pub vm: Option, + + /// Volume mounts + pub mounts: Option>, + + /// Systemd configuration + pub systemd: Option, +} + +/// Project metadata section +#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct ProjectInfo { + /// Optional project name override (defaults to directory name) + pub name: Option, +} + +/// VM configuration section +#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct VmConfig { + /// Container image to run as bootable VM + #[serde(default)] + pub image: String, + + /// Memory allocation (e.g., "4G", "2048M") + #[serde(flatten, default)] + pub memory: crate::common_opts::MemoryOpts, + + /// Number of virtual CPUs + #[serde(flatten, default)] + pub cpu: crate::common_opts::CpuOpts, + + /// Disk size (e.g., "20G", "50G") + #[serde(flatten, default)] + pub disk: crate::common_opts::DiskSizeOpts, + + /// Network mode + #[serde(flatten, default)] + pub net: crate::common_opts::NetworkOpts, + + /// Root filesystem type + pub filesystem: Option, +} + +/// Mount configuration +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct MountConfig { + /// Host path to mount + pub host: String, + + /// Guest mount point or tag + pub guest: String, + + /// Mount as read-only + #[serde(default)] + pub readonly: bool, +} + +/// Systemd configuration section +#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct SystemdConfig { + /// Directory containing systemd units (relative to project root) + pub units_dir: Option, +} + +impl ProjectConfig { + /// Load project configuration from directory + /// + /// Looks for `.bcvk/config.toml` in the given directory. + /// Returns `None` if the config file doesn't exist. + pub fn load_from_dir(dir: &Utf8Path) -> Result> { + let config_path = dir.join(CONFIG_DIR).join(CONFIG_FILE); + + if !config_path.exists() { + return Ok(None); + } + + let content = fs::read_to_string(&config_path) + .with_context(|| format!("Failed to read config file: {}", config_path))?; + + let config: ProjectConfig = toml::from_str(&content) + .with_context(|| format!("Failed to parse config file: {}", config_path))?; + + // Validate that vm section and image are specified + if let Some(ref vm) = config.vm { + if vm.image.is_empty() { + color_eyre::eyre::bail!( + "vm.image must be specified in config file: {}", + config_path + ); + } + } else { + color_eyre::eyre::bail!( + "vm section must be specified in config file: {}", + config_path + ); + } + + Ok(Some(config)) + } + + /// Get the systemd units directory path + /// + /// Returns the absolute path to the systemd units directory if configured. + pub fn systemd_units_dir(&self, project_dir: &Utf8Path) -> Option { + self.systemd + .as_ref() + .and_then(|systemd| systemd.units_dir.as_ref()) + .map(|dir| project_dir.join(dir)) + } + + /// Get the .bcvk/units directory path if it exists + /// + /// This is the default location for systemd units. + pub fn default_units_dir(project_dir: &Utf8Path) -> Option { + let units_dir = project_dir.join(CONFIG_DIR).join("units"); + if units_dir.exists() { + Some(units_dir) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_minimal_config() { + let toml = r#" +[vm] +image = "localhost/my-image" +"#; + + let config: ProjectConfig = toml::from_str(toml).unwrap(); + let vm = config.vm.as_ref().unwrap(); + assert_eq!(vm.image, "localhost/my-image"); + assert_eq!(vm.memory.memory, "4G"); + assert_eq!(vm.cpu.cpus, 2); + assert_eq!(vm.disk.disk_size, "20G"); + } + + #[test] + fn test_parse_full_config() { + let toml = r#" +[project] +name = "my-project" + +[vm] +image = "localhost/my-image" +memory = "8G" +cpus = 4 +disk-size = "50G" +network = "bridge=br0" +filesystem = "xfs" + +[[mounts]] +host = "/data" +guest = "/mnt/data" +readonly = true + +[[mounts]] +host = "/tmp/workspace" +guest = "/workspace" +readonly = false + +[systemd] +units-dir = "systemd-units" +"#; + + let config: ProjectConfig = toml::from_str(toml).unwrap(); + let project = config.project.as_ref().unwrap(); + assert_eq!(project.name, Some("my-project".to_string())); + let vm = config.vm.as_ref().unwrap(); + assert_eq!(vm.image, "localhost/my-image"); + assert_eq!(vm.memory.memory, "8G"); + assert_eq!(vm.cpu.cpus, 4); + assert_eq!(vm.disk.disk_size, "50G"); + assert_eq!(vm.net.network, "bridge=br0"); + assert_eq!(vm.filesystem, Some("xfs".to_string())); + assert_eq!(config.mounts.as_ref().unwrap().len(), 2); + assert_eq!(config.mounts.as_ref().unwrap()[0].host, "/data"); + assert_eq!(config.mounts.as_ref().unwrap()[0].readonly, true); + let systemd = config.systemd.as_ref().unwrap(); + assert_eq!(systemd.units_dir, Some("systemd-units".to_string())); + } +} diff --git a/crates/kit/src/project/down.rs b/crates/kit/src/project/down.rs new file mode 100644 index 0000000..e799a00 --- /dev/null +++ b/crates/kit/src/project/down.rs @@ -0,0 +1,86 @@ +//! Implementation of `bcvk project down` command + +use clap::Parser; +use color_eyre::Result; + +use crate::libvirt::{self, LibvirtOptions}; + +use super::{current_project_dir, project_vm_name}; + +/// Shut down the project VM +/// +/// Gracefully shuts down the VM but does not remove it. +#[derive(Debug, Parser)] +pub struct ProjectDownOpts { + /// Libvirt connection URI (defaults to qemu:///session) + #[clap(long)] + pub connect: Option, + + /// Remove the VM after shutting it down + #[clap(long)] + pub remove: bool, + + #[clap(long)] + pub force: bool, +} + +/// Run the project down command +pub fn run(opts: ProjectDownOpts) -> Result<()> { + // Get current project directory + let project_dir = current_project_dir()?; + + // Load project configuration (optional for down, just for name generation) + let config = crate::project::config::ProjectConfig::load_from_dir(&project_dir)?; + + // Generate project VM name + let vm_name = project_vm_name(&project_dir, config.as_ref())?; + + // Check if VM exists + let libvirt_opts = LibvirtOptions { + connect: opts.connect.clone(), + }; + + if !check_vm_exists(&vm_name, &libvirt_opts)? { + println!("Project is already down. vm_name: '{}'", vm_name); + return Ok(()); + } + + // Stop the VM + println!("Shutting down project VM '{}'...", vm_name); + + let stop_opts = libvirt::stop::LibvirtStopOpts { + name: vm_name.to_string(), + force: opts.force, + timeout: 60, + }; + + let _ = libvirt::stop::run(&libvirt_opts, stop_opts); + + // Remove if requested + if opts.remove { + println!("Removing project VM '{}'...", vm_name); + let rm_opts = libvirt::rm::LibvirtRmOpts { + name: vm_name.to_string(), + force: opts.force, + stop: false, + }; + + libvirt::rm::run(&libvirt_opts, rm_opts)? + } + + Ok(()) +} + +/// Check if a VM exists +fn check_vm_exists(name: &str, libvirt_opts: &LibvirtOptions) -> Result { + use crate::domain_list::DomainLister; + + let lister = if let Some(ref uri) = libvirt_opts.connect { + DomainLister::with_connection(uri.clone()) + } else { + DomainLister::new() + }; + let domains = lister.list_bootc_domains()?; + + Ok(domains.iter().any(|d| d.name == name)) +} diff --git a/crates/kit/src/project/init.rs b/crates/kit/src/project/init.rs new file mode 100644 index 0000000..b3abe4d --- /dev/null +++ b/crates/kit/src/project/init.rs @@ -0,0 +1,123 @@ +//! Implementation of `bcvk project init` command + +use camino::Utf8Path; +use clap::Parser; +use color_eyre::{eyre::Context as _, Result}; +use dialoguer::{theme::ColorfulTheme, Confirm, FuzzySelect, Input}; +use std::fs; +use toml_edit::{value, DocumentMut, Item, Table}; + +use super::{config::CONFIG_DIR, config::CONFIG_FILE, current_project_dir}; + +/// Initialize project configuration interactively +#[derive(Debug, Parser)] +pub struct ProjectInitOpts { + /// Overwrite existing configuration if it exists + #[clap(long, short = 'f')] + pub force: bool, +} + +/// Run the project init command +pub fn run(opts: ProjectInitOpts) -> Result<()> { + let project_dir = current_project_dir()?; + let config_path = project_dir.join(CONFIG_DIR).join(CONFIG_FILE); + + // Check if config already exists + if config_path.exists() && !opts.force { + color_eyre::eyre::bail!( + "Configuration file already exists: {}\n\ + Use --force to overwrite it.", + config_path + ); + } + + generate_config_interactive(&project_dir)?; + println!("\n✓ Configuration saved to .bcvk/config.toml"); + println!("Run 'bcvk project up' to start your VM"); + + Ok(()) +} + +/// Generate project configuration interactively +fn generate_config_interactive(project_dir: &Utf8Path) -> Result<()> { + println!("bcvk project configuration wizard\n"); + + // Get list of bootc images for autocomplete + let images = crate::images::list().unwrap_or_default(); + let image_names: Vec = images + .iter() + .filter_map(|img| { + img.names.as_ref().and_then(|names| { + names.first().map(|name| { + // Remove :latest suffix for cleaner display + if name.ends_with(":latest") { + name.strip_suffix(":latest").unwrap_or(name).to_string() + } else { + name.clone() + } + }) + }) + }) + .collect(); + + // Prompt for container image + let image = if !image_names.is_empty() { + println!("Select a bootc container image (type to filter):"); + let selection = FuzzySelect::with_theme(&ColorfulTheme::default()) + .items(&image_names) + .default(0) + .interact() + .context("Failed to select image")?; + image_names[selection].clone() + } else { + println!("No bootc images found locally."); + Input::::with_theme(&ColorfulTheme::default()) + .with_prompt("Enter container image") + .interact_text() + .context("Failed to get image input")? + }; + + // Ask about custom project name + let custom_name = if Confirm::with_theme(&ColorfulTheme::default()) + .with_prompt("Do you want to set a custom project name?") + .default(false) + .interact() + .context("Failed to get confirmation")? + { + Some( + Input::::with_theme(&ColorfulTheme::default()) + .with_prompt("Project name") + .interact_text() + .context("Failed to get project name")?, + ) + } else { + None + }; + + // Create .bcvk directory + let config_dir = project_dir.join(CONFIG_DIR); + fs::create_dir_all(&config_dir) + .with_context(|| format!("Failed to create directory: {}", config_dir))?; + + // Build TOML document using toml_edit + let mut doc = DocumentMut::new(); + + // Add project name if set + if let Some(name) = custom_name { + let mut project_table = Table::new(); + project_table.insert("name", value(name)); + doc.insert("project", Item::Table(project_table)); + } + + // Add VM section with only the image + let mut vm_table = Table::new(); + vm_table.insert("image", value(image)); + doc.insert("vm", Item::Table(vm_table)); + + // Write configuration file + let config_path = config_dir.join(CONFIG_FILE); + fs::write(&config_path, doc.to_string()) + .with_context(|| format!("Failed to write config file: {}", config_path))?; + + Ok(()) +} diff --git a/crates/kit/src/project/ls.rs b/crates/kit/src/project/ls.rs new file mode 100644 index 0000000..b25f0b0 --- /dev/null +++ b/crates/kit/src/project/ls.rs @@ -0,0 +1,129 @@ +//! Implementation of `bcvk project ls` command + +use clap::Parser; +use color_eyre::Result; +use comfy_table::{presets::UTF8_FULL, Table}; + +use crate::libvirt::OutputFormat; + +/// List all project VMs with their metadata +#[derive(Debug, Parser)] +pub struct ProjectLsOpts { + /// Libvirt connection URI (defaults to qemu:///session) + #[clap(long)] + pub connect: Option, + + /// Output format + #[clap(long, value_enum, default_value_t = OutputFormat::Table)] + pub format: OutputFormat, + + /// Show all VMs including stopped ones + #[clap(long, short = 'a')] + pub all: bool, +} + +/// Run the project ls command +pub fn run(opts: ProjectLsOpts) -> Result<()> { + use crate::domain_list::DomainLister; + use color_eyre::eyre::Context; + + // Use libvirt as the source of truth for domain listing + let lister = match opts.connect.as_ref() { + Some(uri) => DomainLister::with_connection(uri.clone()), + None => DomainLister::new(), + }; + + let mut domains = if opts.all { + lister + .list_bootc_domains() + .with_context(|| "Failed to list bootc domains from libvirt")? + } else { + lister + .list_running_bootc_domains() + .with_context(|| "Failed to list running bootc domains from libvirt")? + }; + + // Filter to only project VMs (those with bcvk-project label) + domains.retain(|d| d.labels.contains(&"bcvk-project".to_string())); + + match opts.format { + OutputFormat::Table => { + if domains.is_empty() { + if opts.all { + println!("No project VMs found"); + println!("Tip: Create a project VM with 'bcvk project up'"); + } else { + println!("No running project VMs found"); + println!("Use --all to see stopped VMs or 'bcvk project up' to create one"); + } + return Ok(()); + } + + let mut table = Table::new(); + table.load_preset(UTF8_FULL); + table.set_header(vec![ + "NAME", + "PROJECT DIR", + "IMAGE", + "STATUS", + "MEMORY", + "SSH", + ]); + + for domain in &domains { + let image = match &domain.image { + Some(img) => { + if img.len() > 38 { + format!("{}...", &img[..35]) + } else { + img.clone() + } + } + None => "".to_string(), + }; + let memory = match domain.memory_mb { + Some(mem) => format!("{}MB", mem), + None => "unknown".to_string(), + }; + let ssh = match domain.ssh_port { + Some(port) if domain.has_ssh_key => format!(":{}", port), + Some(port) => format!(":{}*", port), + None => "-".to_string(), + }; + let project_dir = domain + .project_dir + .as_ref() + .map(|p| p.as_str()) + .unwrap_or(""); + table.add_row(vec![ + &domain.name, + project_dir, + &image, + &domain.status_string(), + &memory, + &ssh, + ]); + } + + println!("{}", table); + println!( + "\nFound {} project VM{} (source: libvirt)", + domains.len(), + if domains.len() == 1 { "" } else { "s" } + ); + } + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&domains) + .with_context(|| "Failed to serialize domains as JSON")? + ); + } + OutputFormat::Yaml => { + return Err(color_eyre::eyre::eyre!( + "YAML format is not supported for list command" + )) + } + } + Ok(()) +} diff --git a/crates/kit/src/project/mod.rs b/crates/kit/src/project/mod.rs new file mode 100644 index 0000000..e89037b --- /dev/null +++ b/crates/kit/src/project/mod.rs @@ -0,0 +1,151 @@ +//! Project-scoped VM management inspired by Vagrant +//! +//! The `bcvk project` commands provide a streamlined workflow for running bootc VMs +//! scoped to a project directory, with configuration via `.bcvk/config.toml`. + +use camino::{Utf8Path, Utf8PathBuf}; +use clap::Subcommand; +use color_eyre::{eyre::Context as _, Result}; +use std::env; + +pub mod config; +pub mod down; +pub mod init; +pub mod ls; +pub mod rm; +pub mod ssh; +pub mod up; + +use config::ProjectConfig; + +/// Project subcommands +#[derive(Debug, Subcommand)] +pub enum ProjectSubcommands { + /// Initialize project configuration interactively + Init(init::ProjectInitOpts), + + /// Create or start the project VM + Up(up::ProjectUpOpts), + + /// SSH into the project VM + Ssh(ssh::ProjectSshOpts), + + /// Shut down the project VM + Down(down::ProjectDownOpts), + + /// Remove the project VM and its resources + Rm(rm::ProjectRmOpts), + + /// List all project VMs + Ls(ls::ProjectLsOpts), +} + +/// Get the current project directory +/// +/// Uses the current working directory. +pub fn current_project_dir() -> Result { + let cwd = env::current_dir().context("Failed to get current directory")?; + Utf8PathBuf::from_path_buf(cwd) + .map_err(|p| color_eyre::eyre::eyre!("Path is not valid UTF-8: {}", p.display())) +} + +/// Generate a project name from the current directory +/// +/// Priority: +/// 1. Config file `project.name` field +/// 2. Directory name (sanitized) +pub fn generate_project_name( + project_dir: &Utf8Path, + config: Option<&ProjectConfig>, +) -> Result { + if let Some(config) = config { + if let Some(project) = &config.project { + if let Some(name) = &project.name { + return Ok(sanitize_name(name)); + } + } + } + + let dir_name = project_dir + .file_name() + .ok_or_else(|| color_eyre::eyre::eyre!("Could not determine directory name"))?; + + Ok(sanitize_name(dir_name)) +} + +/// Sanitize a name for use as a libvirt domain name +/// +/// Replaces characters that are not alphanumeric, hyphen, or underscore with hyphens. +/// Ensures the name starts with an alphanumeric character. +fn sanitize_name(name: &str) -> String { + let mut result = String::with_capacity(name.len()); + let mut chars = name.chars().peekable(); + + // Skip leading non-alphanumeric characters + while let Some(&c) = chars.peek() { + if c.is_alphanumeric() { + break; + } + chars.next(); + } + + // Process remaining characters + for c in chars { + if c.is_alphanumeric() || c == '-' || c == '_' { + result.push(c); + } else { + result.push('-'); + } + } + + // If empty after sanitization, use a default + if result.is_empty() { + result = "bcvk-project".to_string(); + } + + result +} + +/// Generate the project VM name with "bcvk-project-" prefix +pub fn project_vm_name(project_dir: &Utf8Path, config: Option<&ProjectConfig>) -> Result { + let name = generate_project_name(project_dir, config)?; + Ok(format!("bcvk-project-{}", name)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sanitize_name() { + assert_eq!(sanitize_name("my-project"), "my-project"); + assert_eq!(sanitize_name("my_project"), "my_project"); + assert_eq!(sanitize_name("my project"), "my-project"); + assert_eq!(sanitize_name("my/project"), "my-project"); + assert_eq!(sanitize_name("123project"), "123project"); + assert_eq!(sanitize_name("---project"), "project"); + assert_eq!(sanitize_name("project@123"), "project-123"); + assert_eq!(sanitize_name("!!!"), "bcvk-project"); + } + + #[test] + fn test_git_origin_name_extraction() { + // Test various git URL formats + let test_cases = vec![ + "https://github.com/user/repo.git", + "git@github.com:user/repo.git", + "/path/to/repo.git", + "https://github.com/user/repo", + ]; + + for url in test_cases { + let name = url + .rsplit('/') + .next() + .unwrap_or(url) + .strip_suffix(".git") + .unwrap_or(url.rsplit('/').next().unwrap_or(url)); + assert_eq!(name, "repo"); + } + } +} diff --git a/crates/kit/src/project/rm.rs b/crates/kit/src/project/rm.rs new file mode 100644 index 0000000..b44e8ff --- /dev/null +++ b/crates/kit/src/project/rm.rs @@ -0,0 +1,53 @@ +//! Implementation of `bcvk project rm` command + +use clap::Parser; +use color_eyre::Result; + +use crate::libvirt::{self, LibvirtOptions}; + +use super::{current_project_dir, project_vm_name}; + +/// Remove the project VM and its resources +/// +/// Permanently removes the VM and its associated disk images. +#[derive(Debug, Parser)] +pub struct ProjectRmOpts { + /// Libvirt connection URI (defaults to qemu:///session) + #[clap(long)] + pub connect: Option, + + /// Force removal without confirmation + #[clap(long, short = 'f')] + pub force: bool, + + /// Remove domain even if it's running + #[clap(long)] + pub stop: bool, +} + +/// Run the project rm command +pub fn run(opts: ProjectRmOpts) -> Result<()> { + // Get current project directory + let project_dir = current_project_dir()?; + + // Load project configuration (optional for rm, just for name generation) + let config = crate::project::config::ProjectConfig::load_from_dir(&project_dir)?; + + // Generate project VM name + let vm_name = project_vm_name(&project_dir, config.as_ref())?; + + // Build libvirt options + let libvirt_opts = LibvirtOptions { + connect: opts.connect, + }; + + // Build libvirt rm options + let rm_opts = libvirt::rm::LibvirtRmOpts { + name: vm_name, + force: opts.force, + stop: opts.stop, + }; + + // Delegate to libvirt rm + libvirt::rm::run(&libvirt_opts, rm_opts) +} diff --git a/crates/kit/src/project/ssh.rs b/crates/kit/src/project/ssh.rs new file mode 100644 index 0000000..bad350b --- /dev/null +++ b/crates/kit/src/project/ssh.rs @@ -0,0 +1,177 @@ +//! Implementation of `bcvk project ssh` command + +use clap::Parser; +use color_eyre::Result; + +use crate::libvirt::{self, LibvirtOptions}; + +use super::{current_project_dir, project_vm_name}; + +/// SSH into the project VM +/// +/// Automatically starts the VM if it's stopped. +#[derive(Debug, Parser)] +pub struct ProjectSshOpts { + /// Libvirt connection URI (defaults to qemu:///session) + #[clap(long)] + pub connect: Option, + + /// Run bootc upgrade in two stages (fetch/stage, then apply/reboot) before connecting + #[clap(long, short = 'A')] + pub update: bool, + + /// Command to execute in the VM (if empty, opens interactive shell) + #[clap(allow_hyphen_values = true)] + pub command: Vec, +} + +/// Run the project ssh command +pub fn run(opts: ProjectSshOpts) -> Result<()> { + // Get current project directory + let project_dir = current_project_dir()?; + + // Load project configuration (optional for ssh, just for name generation) + let config = crate::project::config::ProjectConfig::load_from_dir(&project_dir)?; + + // Generate project VM name + let vm_name = project_vm_name(&project_dir, config.as_ref())?; + + // Check VM state and start if needed + let libvirt_opts = LibvirtOptions { + connect: opts.connect.clone(), + }; + + ensure_vm_running(&vm_name, &libvirt_opts)?; + + // If --update is requested, run bootc upgrade in two stages + if opts.update { + // Stage 1: Fetch and prepare the update (without reboot) + println!("Running bootc upgrade (fetching and staging update)..."); + let upgrade_opts = libvirt::ssh::LibvirtSshOpts { + domain_name: vm_name.clone(), + user: "root".to_string(), + command: vec!["bootc".to_string(), "upgrade".to_string()], + strict_host_keys: false, + timeout: 600, // 10 minutes for upgrade + log_level: "ERROR".to_string(), + extra_options: vec![], + }; + + // Run the upgrade command and catch errors + match libvirt::ssh::run(&libvirt_opts, upgrade_opts) { + Ok(_) => { + println!("Update staged successfully."); + } + Err(e) => { + eprintln!("Error during bootc upgrade: {}", e); + return Err(e); + } + } + + // Stage 2: Apply the update (will cause reboot) + println!("Applying update and rebooting VM..."); + let apply_opts = libvirt::ssh::LibvirtSshOpts { + domain_name: vm_name.clone(), + user: "root".to_string(), + command: vec![ + "bootc".to_string(), + "upgrade".to_string(), + "--apply".to_string(), + ], + strict_host_keys: false, + timeout: 60, + log_level: "ERROR".to_string(), + extra_options: vec![], + }; + + // This will fail with connection error when VM reboots - that's expected + let _ = libvirt::ssh::run(&libvirt_opts, apply_opts); + println!("VM is rebooting to apply update..."); + + // Wait for VM to come back up + println!("Waiting for VM to restart..."); + std::thread::sleep(std::time::Duration::from_secs(5)); + + // Wait for SSH to be available again + let mut retries = 30; + loop { + let test_opts = libvirt::ssh::LibvirtSshOpts { + domain_name: vm_name.clone(), + user: "root".to_string(), + command: vec!["true".to_string()], + strict_host_keys: false, + timeout: 5, + log_level: "ERROR".to_string(), + extra_options: vec![], + }; + + if libvirt::ssh::run(&libvirt_opts, test_opts).is_ok() { + println!("VM is back online after update."); + break; + } + + retries -= 1; + if retries == 0 { + return Err(color_eyre::eyre::eyre!( + "Timeout waiting for VM to come back online after update" + )); + } + std::thread::sleep(std::time::Duration::from_secs(2)); + } + } + + // SSH into the VM (interactive shell or command execution) + let ssh_opts = libvirt::ssh::LibvirtSshOpts { + domain_name: vm_name.clone(), + user: "root".to_string(), + command: opts.command, + strict_host_keys: false, + timeout: 30, + log_level: "ERROR".to_string(), + extra_options: vec![], + }; + + libvirt::ssh::run(&libvirt_opts, ssh_opts) +} + +/// Ensure the VM is running, starting it if necessary +fn ensure_vm_running(name: &str, libvirt_opts: &LibvirtOptions) -> Result<()> { + use crate::domain_list::DomainLister; + + let lister = if let Some(ref uri) = libvirt_opts.connect { + DomainLister::with_connection(uri.clone()) + } else { + DomainLister::new() + }; + let domains = lister.list_bootc_domains()?; + + let domain = domains.iter().find(|d| d.name == name).ok_or_else(|| { + color_eyre::eyre::eyre!( + "Project VM '{}' not found. Run 'bcvk project up' first.", + name + ) + })?; + + match domain.state.as_str() { + "running" => { + // Already running, nothing to do + Ok(()) + } + "shut off" | "paused" => { + println!("Starting project VM '{}'...", name); + let start_opts = libvirt::start::LibvirtStartOpts { + name: name.to_string(), + ssh: false, + }; + libvirt::start::run(libvirt_opts, start_opts) + } + state => { + color_eyre::eyre::bail!( + "Project VM '{}' is in unexpected state '{}'. \ + Please check the VM status manually.", + name, + state + ); + } + } +} diff --git a/crates/kit/src/project/up.rs b/crates/kit/src/project/up.rs new file mode 100644 index 0000000..f1393be --- /dev/null +++ b/crates/kit/src/project/up.rs @@ -0,0 +1,310 @@ +//! Implementation of `bcvk project up` command + +use camino::Utf8Path; +use clap::Parser; +use color_eyre::{eyre::Context as _, Result}; + +use crate::libvirt::{self, run::spawn_lifecycle_monitor, LibvirtOptions}; + +use super::{config::ProjectConfig, current_project_dir, project_vm_name}; + +/// Create or start the project VM +/// +/// Automatically names and manages a VM scoped to the current project directory. +/// Won't recreate if a VM with the same name already exists. +#[derive(Debug, Parser)] +pub struct ProjectUpOpts { + /// Libvirt connection URI (defaults to qemu:///session) + #[clap(long)] + pub connect: Option, + + /// Automatically SSH into the VM after creation + #[clap(long)] + pub ssh: bool, + + /// Disable lifecycle binding (don't shutdown VM when parent exits) + #[clap(long, short = 'L')] + pub no_lifecycle_bind: bool, + + /// Enable automatic updates via bootc-fetch-apply-updates every 30s + #[clap(long)] + pub auto_update: bool, + + /// Reset: remove existing VM (force stop and delete) before creating new one + #[clap(long, short = 'R')] + pub reset: bool, +} + +/// Run the project up command +pub fn run(opts: ProjectUpOpts) -> Result<()> { + // Get current project directory + let project_dir = current_project_dir()?; + + // Load project configuration + let config = ProjectConfig::load_from_dir(&project_dir)?.ok_or_else(|| { + color_eyre::eyre::eyre!( + "No .bcvk/config.toml found in current directory.\n\ + Run 'bcvk project init' to create one interactively,\n\ + or create .bcvk/config.toml manually with at least vm.image specified." + ) + })?; + + // Generate project VM name + let vm_name = project_vm_name(&project_dir, Some(&config))?; + + // Build libvirt options + let libvirt_opts = LibvirtOptions { + connect: opts.connect.clone(), + }; + + // If reset flag is set, remove existing VM first + if opts.reset { + let rm_opts = libvirt::rm::LibvirtRmOpts { + name: vm_name.clone(), + force: true, + stop: true, + }; + // Ignore errors from rm (VM might not exist) + let _ = libvirt::rm::run(&libvirt_opts, rm_opts); + } + + let existing_vm = check_vm_exists(&vm_name, &libvirt_opts)?; + + if let Some(state) = existing_vm { + match state.as_str() { + "running" => { + println!("Project VM '{}' is already running", vm_name); + // Spawn lifecycle monitor for already-running VM + if !opts.no_lifecycle_bind { + spawn_lifecycle_monitor(&vm_name, libvirt_opts.connect.as_deref())?; + } + if opts.ssh { + ssh_into_vm(&vm_name, &libvirt_opts)?; + } + return Ok(()); + } + "shut off" | "paused" => { + println!("Starting existing project VM '{}'...", vm_name); + start_vm(&vm_name, &libvirt_opts)?; + // Spawn lifecycle monitor after starting VM + if !opts.no_lifecycle_bind { + spawn_lifecycle_monitor(&vm_name, libvirt_opts.connect.as_deref())?; + } + if opts.ssh { + ssh_into_vm(&vm_name, &libvirt_opts)?; + } + return Ok(()); + } + _ => { + println!( + "Project VM '{}' exists in state '{}', starting...", + vm_name, state + ); + start_vm(&vm_name, &libvirt_opts)?; + // Spawn lifecycle monitor after starting VM + if !opts.no_lifecycle_bind { + spawn_lifecycle_monitor(&vm_name, libvirt_opts.connect.as_deref())?; + } + if opts.ssh { + ssh_into_vm(&vm_name, &libvirt_opts)?; + } + return Ok(()); + } + } + } + + // VM doesn't exist, create it + println!("Creating project VM '{}'...", vm_name); + create_vm( + &vm_name, + &config, + &project_dir, + &libvirt_opts, + opts.ssh, + !opts.no_lifecycle_bind, + opts.auto_update, + )?; + + Ok(()) +} + +/// Check if a VM exists and return its state +fn check_vm_exists(name: &str, libvirt_opts: &LibvirtOptions) -> Result> { + use crate::domain_list::DomainLister; + + let lister = if let Some(ref uri) = libvirt_opts.connect { + DomainLister::with_connection(uri.clone()) + } else { + DomainLister::new() + }; + let domains = lister.list_bootc_domains()?; + + for domain in domains { + if domain.name == name { + return Ok(Some(domain.state)); + } + } + + Ok(None) +} + +/// Start an existing VM +fn start_vm(name: &str, libvirt_opts: &LibvirtOptions) -> Result<()> { + let start_opts = libvirt::start::LibvirtStartOpts { + name: name.to_string(), + ssh: false, + }; + + libvirt::start::run(libvirt_opts, start_opts) +} + +/// SSH into a running VM +fn ssh_into_vm(name: &str, libvirt_opts: &LibvirtOptions) -> Result<()> { + let ssh_opts = libvirt::ssh::LibvirtSshOpts { + domain_name: name.to_string(), + user: "root".to_string(), + command: vec![], + strict_host_keys: false, + timeout: 30, + log_level: "ERROR".to_string(), + extra_options: vec![], + }; + + libvirt::ssh::run(libvirt_opts, ssh_opts) +} + +/// Create a new project VM +fn create_vm( + name: &str, + config: &ProjectConfig, + project_dir: &Utf8Path, + libvirt_opts: &LibvirtOptions, + ssh: bool, + lifecycle_bind: bool, + auto_update: bool, +) -> Result<()> { + use crate::install_options::InstallOptions; + use crate::libvirt::run::{FirmwareType, LibvirtRunOpts}; + + // Build run options from project config + // We know vm exists because load_from_dir validates it + let vm = config.vm.as_ref().unwrap(); + + let mut run_opts = LibvirtRunOpts { + image: vm.image.clone(), + name: Some(name.to_string()), + memory: vm.memory.clone(), + cpus: vm.cpu.cpus, + disk_size: vm.disk.disk_size.clone(), + install: InstallOptions::default(), + port_mappings: vec![], + raw_volumes: vec![], + bind_mounts: vec![], + bind_mounts_ro: vec![], + network: vm.net.network.clone(), + detach: !ssh, // Don't detach if we're going to SSH + ssh, + bind_storage_ro: true, + firmware: FirmwareType::UefiSecure, + disable_tpm: false, + secure_boot_keys: None, + label: vec!["bcvk-project".to_string()], + transient: false, + lifecycle_bind_parent: lifecycle_bind, + metadata: { + let mut m = std::collections::HashMap::new(); + m.insert("bootc:project-dir".to_string(), project_dir.to_string()); + m + }, + extra_smbios_credentials: vec![], + }; + + run_opts.install.filesystem = Some( + vm.filesystem + .clone() + .unwrap_or_else(|| crate::libvirt::LIBVIRT_DEFAULT_FILESYSTEM.to_string()), + ); + + // Bind project directory to /run/src read-only with auto-mount + // (will fall back to read-write if libvirt doesn't support readonly virtiofs) + run_opts.bind_mounts_ro.push( + format!("{}:/run/src", project_dir.as_str()) + .parse() + .context("Failed to parse project directory bind mount")?, + ); + + // Add configured mounts using bind mount options + for mount in config.mounts.iter().flatten() { + let mount_spec = format!("{}/{}:{}", project_dir.as_str(), mount.host, mount.guest); + let bind_mount = mount_spec + .parse() + .with_context(|| format!("Failed to parse mount spec: {}", mount_spec))?; + if mount.readonly { + run_opts.bind_mounts_ro.push(bind_mount); + } else { + run_opts.bind_mounts.push(bind_mount); + } + } + + // Check for systemd units + let units_dir = config + .systemd_units_dir(project_dir) + .or_else(|| ProjectConfig::default_units_dir(project_dir)); + + if let Some(units_dir) = units_dir { + if units_dir.exists() { + println!("Injecting systemd units from: {}", units_dir); + // TODO: Implement systemd unit injection + // For now, warn that it's not yet implemented + eprintln!( + "Warning: Systemd unit injection is not yet implemented. Units in {} will be ignored.", + units_dir + ); + } + } + + // Configure auto-update if requested + if auto_update { + println!("Enabling automatic updates (every 30s)..."); + + // Generate dropin for bootc-fetch-apply-updates.service to use host container storage + let service_dropin_content = "\ +[Service] +Environment=STORAGE_OPTS=additionalimagestore=/run/host-container-storage +"; + let service_dropin_encoded = + data_encoding::BASE64.encode(service_dropin_content.as_bytes()); + let service_dropin_cred = format!( + "io.systemd.credential.binary:systemd.unit-dropin.bootc-fetch-apply-updates.service~bcvk-auto-update={}", + service_dropin_encoded + ); + run_opts.extra_smbios_credentials.push(service_dropin_cred); + + // Generate dropin for bootc-fetch-apply-updates.timer to run every 30s + let timer_dropin_content = "\ +[Timer] +OnBootSec= +OnCalendar= +OnUnitActiveSec= +OnUnitInactiveSec= +OnBootSec=30 +OnUnitActiveSec=30 +"; + let timer_dropin_encoded = data_encoding::BASE64.encode(timer_dropin_content.as_bytes()); + let timer_dropin_cred = format!( + "io.systemd.credential.binary:systemd.unit-dropin.bootc-fetch-apply-updates.timer~bcvk-auto-update={}", + timer_dropin_encoded + ); + run_opts.extra_smbios_credentials.push(timer_dropin_cred); + } + + // Note: STORAGE_OPTS environment configuration is injected in libvirt::run::run() via: + // - systemd.extra-unit (bcvk-storage-opts.service) for /etc/environment (PAM/SSH sessions) + // - tmpfiles.extra for systemd user/system manager configuration + + // Run the VM + libvirt::run::run(libvirt_opts, run_opts) + .with_context(|| format!("Failed to create project VM '{}'", name))?; + + Ok(()) +} diff --git a/crates/kit/src/to_disk.rs b/crates/kit/src/to_disk.rs index 434aff9..492e8b6 100644 --- a/crates/kit/src/to_disk.rs +++ b/crates/kit/src/to_disk.rs @@ -182,11 +182,22 @@ impl ToDiskOpts { /// Generate the complete bootc installation command arguments for SSH execution fn generate_bootc_install_command(&self, disk_size: u64) -> Result> { - let source_imgref = format!("containers-storage:{}", self.source_image); + // Auto-detect localhost/ images and use --target-transport containers-storage + let mut install_opts = self.install.clone(); + let source_imgref = if self.source_image.starts_with("localhost/") { + // For localhost/ images, use --target-transport instead of containers-storage: prefix + if install_opts.target_transport.is_none() { + install_opts.target_transport = Some("containers-storage".to_string()); + } + self.source_image.clone() + } else { + // For other images, use containers-storage: prefix (existing behavior) + format!("containers-storage:{}", self.source_image) + }; // Quote each bootc argument individually to prevent shell injection let mut quoted_bootc_args = Vec::new(); - for arg in self.install.to_bootc_args() { + for arg in install_opts.to_bootc_args() { let quoted = shlex::try_quote(&arg) .map_err(|e| eyre!("Failed to quote bootc argument '{}': {}", arg, e))?; quoted_bootc_args.push(quoted.to_string()); @@ -514,7 +525,7 @@ fn write_disk_metadata( .with_context(|| format!("Failed to open disk file {}", target_disk))?; metadata - .write_to_file(&file) + .write_to_file(&file, install_options) .with_context(|| "Failed to write metadata to disk file")?; debug!( @@ -570,4 +581,68 @@ mod tests { Ok(()) } + + #[test] + fn test_localhost_image_detection() -> Result<()> { + // Test localhost/ image uses --target-transport + let opts_localhost = ToDiskOpts { + source_image: "localhost/my-image:latest".to_string(), + target_disk: "/tmp/test.img".into(), + install: InstallOptions::default(), + additional: ToDiskAdditionalOpts { + disk_size: Some("10G".to_string()), + ..Default::default() + }, + }; + + let cmd = opts_localhost.generate_bootc_install_command(10 * 1024 * 1024 * 1024)?; + let script = &cmd[2]; // The script is the third argument after /bin/bash -c + + // Should use plain image name (not containers-storage: prefix) + assert!( + script.contains("localhost/my-image:latest"), + "Script should contain localhost/my-image:latest" + ); + // Should NOT have containers-storage: prefix for localhost/ images + assert!( + !script.contains("containers-storage:localhost/"), + "Script should not contain containers-storage: prefix for localhost/ images" + ); + // Should include --target-transport in bootc args + assert!( + script.contains("--target-transport"), + "Script should contain --target-transport flag" + ); + assert!( + script.contains("containers-storage"), + "Script should contain containers-storage as transport value" + ); + + // Test non-localhost image uses containers-storage: prefix (existing behavior) + let opts_regular = ToDiskOpts { + source_image: "quay.io/my-image:latest".to_string(), + target_disk: "/tmp/test.img".into(), + install: InstallOptions::default(), + additional: ToDiskAdditionalOpts { + disk_size: Some("10G".to_string()), + ..Default::default() + }, + }; + + let cmd_regular = opts_regular.generate_bootc_install_command(10 * 1024 * 1024 * 1024)?; + let script_regular = &cmd_regular[2]; + + // Should use containers-storage: prefix + assert!( + script_regular.contains("containers-storage:quay.io/my-image:latest"), + "Script should contain containers-storage:quay.io/my-image:latest" + ); + // Should NOT include --target-transport for non-localhost images + assert!( + !script_regular.contains("--target-transport"), + "Script should not contain --target-transport flag for non-localhost images" + ); + + Ok(()) + } } diff --git a/docs/src/man/bcvk-ephemeral.md b/docs/src/man/bcvk-ephemeral.md index 123eb26..d7c68ee 100644 --- a/docs/src/man/bcvk-ephemeral.md +++ b/docs/src/man/bcvk-ephemeral.md @@ -6,6 +6,8 @@ bcvk-ephemeral - Manage ephemeral VMs for bootc containers **bcvk ephemeral** [*OPTIONS*] +**bcvk e** [*OPTIONS*] + # DESCRIPTION Manage ephemeral VMs for bootc containers diff --git a/docs/src/man/bcvk-libvirt-run.md b/docs/src/man/bcvk-libvirt-run.md index 1db781f..2c1278b 100644 --- a/docs/src/man/bcvk-libvirt-run.md +++ b/docs/src/man/bcvk-libvirt-run.md @@ -122,6 +122,10 @@ Run a bootable container as a persistent VM Create a transient VM that disappears on shutdown/reboot +**--lifecycle-bind-parent** + + Bind VM lifecycle to parent process (shutdown VM when parent exits) + # EXAMPLES diff --git a/docs/src/man/bcvk-libvirt.md b/docs/src/man/bcvk-libvirt.md index a6877d5..c738850 100644 --- a/docs/src/man/bcvk-libvirt.md +++ b/docs/src/man/bcvk-libvirt.md @@ -6,6 +6,8 @@ bcvk-libvirt - Manage libvirt integration for bootc containers **bcvk libvirt** \[**-h**\|**\--help**\] \<*subcommands*\> +**bcvk v** \[**-h**\|**\--help**\] \<*subcommands*\> + # DESCRIPTION Comprehensive libvirt integration with subcommands for uploading disk images, diff --git a/docs/src/man/bcvk-project-down.md b/docs/src/man/bcvk-project-down.md new file mode 100644 index 0000000..2be85ce --- /dev/null +++ b/docs/src/man/bcvk-project-down.md @@ -0,0 +1,50 @@ +# NAME + +bcvk-project-down - Shut down the project VM + +# SYNOPSIS + +**bcvk project down** [*OPTIONS*] + +# DESCRIPTION + +Shut down the project VM + +# OPTIONS + + +**--connect**=*CONNECT* + + Libvirt connection URI (defaults to qemu:///session) + +**--remove** + + Remove the VM after shutting it down + +**--force** + + + + + +# EXAMPLES + +Shut down the project VM: + + bcvk project down + +The VM is stopped but not removed. You can start it again with `bcvk project up`. + +Shut down and remove the project VM: + + bcvk project down --remove + +This completely deletes the VM, freeing up all associated storage. + +# SEE ALSO + +**bcvk**(8), **bcvk-project**(8), **bcvk-project-up**(8), **bcvk-project-ssh**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project-init.md b/docs/src/man/bcvk-project-init.md new file mode 100644 index 0000000..d997812 --- /dev/null +++ b/docs/src/man/bcvk-project-init.md @@ -0,0 +1,62 @@ +# NAME + +bcvk-project-init - Initialize project configuration interactively + +# SYNOPSIS + +**bcvk project init** [*OPTIONS*] + +# DESCRIPTION + +Initialize project configuration interactively by creating a `.bcvk/config.toml` file +in the current directory. The wizard will guide you through selecting a bootc container +image and optionally setting a custom project name. + +The configuration file is minimal by default, containing only the required `vm.image` +field and any custom settings you specify. Additional configuration such as memory, +CPU, disk size, and volume mounts can be added manually to the generated file. + +# OPTIONS + + +**-f**, **--force** + + Overwrite existing configuration if it exists + + + +# EXAMPLES + +Initialize a new project in the current directory: + + cd /path/to/my-project + bcvk project init + +The wizard will prompt you to select from available bootc images or enter a custom image name. + +Overwrite existing configuration: + + bcvk project init --force + +This is useful if you want to reset your project configuration or if the config file is corrupted. + +Example generated configuration: + + [vm] + image = "quay.io/fedora/fedora-bootc:42" + +Or with a custom project name: + + [project] + name = "my-custom-name" + + [vm] + image = "quay.io/centos-bootc/centos-bootc:stream10" + +# SEE ALSO + +**bcvk**(8), **bcvk-project**(8), **bcvk-project-up**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project-ls.md b/docs/src/man/bcvk-project-ls.md new file mode 100644 index 0000000..5149809 --- /dev/null +++ b/docs/src/man/bcvk-project-ls.md @@ -0,0 +1,47 @@ +# NAME + +bcvk-project-ls - List all project VMs + +# SYNOPSIS + +**bcvk project ls** [*OPTIONS*] + +# DESCRIPTION + +List all project VMs + +# OPTIONS + + +**--connect**=*CONNECT* + + Libvirt connection URI (defaults to qemu:///session) + +**--format**=*FORMAT* + + Output format + + Possible values: + - table + - json + - yaml + + Default: table + +**-a**, **--all** + + Show all VMs including stopped ones + + + +# EXAMPLES + +TODO: Add practical examples showing how to use this command. + +# SEE ALSO + +**bcvk**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project-rm.md b/docs/src/man/bcvk-project-rm.md new file mode 100644 index 0000000..d82b90e --- /dev/null +++ b/docs/src/man/bcvk-project-rm.md @@ -0,0 +1,61 @@ +# NAME + +bcvk-project-rm - Remove the project VM and its resources + +# SYNOPSIS + +**bcvk project rm** [*OPTIONS*] + +# DESCRIPTION + +Permanently removes the project VM and its associated disk images. This is equivalent +to `bcvk project down --remove` but provides more granular control with options for +forcing removal and handling running VMs. + +By default, this command will ask for confirmation before removing the VM. Use `--force` +to skip the confirmation prompt, which is useful for automated scripts. + +# OPTIONS + + +**--connect**=*CONNECT* + + Libvirt connection URI (defaults to qemu:///session) + +**-f**, **--force** + + Force removal without confirmation + +**--stop** + + Remove domain even if it's running + + + +# EXAMPLES + +Remove a stopped project VM: + + bcvk project rm + +This will prompt for confirmation before removing the VM. + +Force removal without confirmation: + + bcvk project rm --force + +Useful for automated cleanup scripts. + +Remove a running VM: + + bcvk project rm --stop --force + +This will stop the VM if it's running, then remove it without prompting for confirmation. + +# SEE ALSO + +**bcvk**(8), **bcvk-project**(8), **bcvk-project-down**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project-ssh.md b/docs/src/man/bcvk-project-ssh.md new file mode 100644 index 0000000..833be3d --- /dev/null +++ b/docs/src/man/bcvk-project-ssh.md @@ -0,0 +1,109 @@ +# NAME + +bcvk-project-ssh - SSH into the project VM + +# SYNOPSIS + +**bcvk project ssh** [*OPTIONS*] + +# DESCRIPTION + +SSH into the project VM. + +Automatically starts the VM if it's stopped. + +## Manual upgrade trigger + +The `--update` (or `-A`) flag performs a two-stage bootc upgrade before establishing +the SSH connection: + +1. First, it runs `bootc upgrade` to fetch and stage the update. Any errors during + this phase are caught and reported before the VM reboots. + +2. If staging succeeds, it runs `bootc upgrade --apply` to apply the update and + reboot the VM. The command waits for the VM to come back online after reboot. + +This allows you to manually trigger an immediate upgrade of the bootc deployment +in your VM, useful when you've built a new version of your container image and +want to deploy it right away without waiting for automatic updates. + +The upgrade command has a 10-minute timeout and streams its output to the +console in real-time, so you can monitor the upgrade progress. After the +VM reboots and comes back online, the SSH connection is automatically established +(either opening an interactive shell or running the specified command). + +# OPTIONS + + +**COMMAND** + + Command to execute in the VM (if empty, opens interactive shell) + +**--connect**=*CONNECT* + + Libvirt connection URI (defaults to qemu:///session) + +**-A**, **--update** + + Run bootc upgrade in two stages (fetch/stage, then apply/reboot) before connecting + + + +# EXAMPLES + +Open an interactive SSH session: + + bcvk project ssh + +If the VM is stopped, it will be started automatically before connecting. + +Run a single command in the VM: + + bcvk project ssh ls -la /workspace + +Execute a command with multiple arguments: + + bcvk project ssh -- systemctl status myservice + +The `--` separator ensures all following arguments are passed to the VM command. + +Trigger a bootc upgrade before connecting: + + bcvk project ssh --update + +Or using the short flag: + + bcvk project ssh -A + +This runs `bootc upgrade --apply` and then opens an interactive shell after +the upgrade completes. + +Trigger upgrade and run a command: + + bcvk project ssh --update bootc status + +This upgrades the deployment and then immediately checks the bootc status to +verify the new deployment. + +Example workflow for manual upgrade: + + # Rebuild your container image + podman build -t localhost/my-app:dev . + + # Immediately apply the update and reconnect + bcvk project ssh -A + + # Verify the deployment + bootc status + +You can also check the upgrade logs: + + bcvk project ssh -A journalctl -u bootc-fetch-apply-updates -n 50 + +# SEE ALSO + +**bcvk**(8), **bcvk-project**(8), **bcvk-project-up**(8), **bcvk-project-down**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project-up.md b/docs/src/man/bcvk-project-up.md new file mode 100644 index 0000000..9ff268d --- /dev/null +++ b/docs/src/man/bcvk-project-up.md @@ -0,0 +1,115 @@ +# NAME + +bcvk-project-up - Create or start the project VM + +# SYNOPSIS + +**bcvk project up** [*OPTIONS*] + +# DESCRIPTION + +Create or start the project VM. + +Automatically names and manages a VM scoped to the current project directory. +Won't recreate if a VM with the same name already exists. + +## Lifecycle binding to parent process + +The expectation is that this tool is invoked from a persistent interactive +shell when working on a project. By default, a "lifecycle-bind" child process +will be run in the background which monitors the parent, and when it exits +then the VM will be shut down. This provides convenient semantics for users +of IDEs and similar tools. Use `--no-lifecycle-bind` to disable. + +## Automatic updates + +The `--auto-update` flag enables rapid iteration during development by +configuring the VM to automatically check for and apply bootc updates every +30 seconds. This is accomplished by injecting systemd unit dropins that: + +- Configure `bootc-fetch-apply-updates.service` to use the host's container + storage via virtiofs (mounted at `/run/host-container-storage`) +- Override `bootc-fetch-apply-updates.timer` to run every 30 seconds instead + of the default interval + +This allows you to build a new version of your container image on the host, +and have it automatically deployed to the VM within 30 seconds - perfect for +fast development iteration cycles. + +# OPTIONS + + +**--connect**=*CONNECT* + + Libvirt connection URI (defaults to qemu:///session) + +**--ssh** + + Automatically SSH into the VM after creation + +**-L**, **--no-lifecycle-bind** + + Disable lifecycle binding (don't shutdown VM when parent exits) + +**--auto-update** + + Enable automatic updates via bootc-fetch-apply-updates every 30s + +**-R**, **--reset** + + Reset: remove existing VM (force stop and delete) before creating new one + + + +# EXAMPLES + +Start a project VM from existing configuration: + + bcvk project up + +This requires a `.bcvk/config.toml` file in the current directory. If you don't have one yet, +run `bcvk project init` to create it interactively. + +Minimum required configuration: + + [vm] + image = "quay.io/fedora/fedora-bootc:42" + +The project directory is automatically mounted at `/run/src` in the VM as read-only. + +Start and immediately SSH into the VM: + + bcvk project up --ssh + +Disable automatic lifecycle binding: + + bcvk project up --no-lifecycle-bind + +This keeps the VM running even after the parent process exits. + +Start with automatic updates enabled: + + bcvk project up --auto-update + +This enables automatic bootc updates every 30 seconds, ideal for development +workflows where you're frequently rebuilding your container image. + +Example development workflow with auto-update: + + # Start VM with auto-update enabled + bcvk project up --auto-update --ssh + + # In another terminal, rebuild your image + podman build -t localhost/my-app:dev . + + # The VM will detect and apply the update within 30 seconds + # You can watch it happen: + journalctl -f -u bootc-fetch-apply-updates + +# SEE ALSO + +**bcvk**(8), **bcvk-project**(8), **bcvk-project-down**(8), **bcvk-project-ssh**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk-project.md b/docs/src/man/bcvk-project.md new file mode 100644 index 0000000..ff2dc23 --- /dev/null +++ b/docs/src/man/bcvk-project.md @@ -0,0 +1,88 @@ +# NAME + +bcvk-project - Project-scoped VM management +# SYNOPSIS + +**bcvk project** [*OPTIONS*] + +**bcvk p** [*OPTIONS*] + +# DESCRIPTION + +Project-scoped VM management. A "project" is typically +a git repository, defining a bootc-based system. + +Often one might use this tool as a way to conveniently +test an operating system (variant) locally before deployment. + +## Similarity to Vagrant + +Similar to Vagrant, `bcvk project` manages development VMs on a per-directory +basis. The key differences are that bcvk uses bootc container images instead +of traditional VM images, and all configuration is stored in `.bcvk/config.toml` +rather than a Vagrantfile. + +## Development workflow + +The project commands are designed for rapid iteration when developing bootc-based +systems: + +1. **Initial setup**: `bcvk project init` creates a `.bcvk/config.toml` configuration +2. **Start VM**: `bcvk project up` creates and starts the VM +3. **Make changes**: Edit your Containerfile and rebuild the image +4. **Test updates**: Either wait for automatic updates (with `--auto-update`) or + manually trigger with `bcvk project ssh -A` + +For development workflows, consider using `bcvk project up --auto-update` to +enable automatic deployment of changes every 30 seconds, or use `bcvk project ssh -A` +to manually trigger immediate upgrades when you've rebuilt your image. + + + + +# EXAMPLES + +Initialize a new project with the interactive wizard: + + cd /path/to/my-project + bcvk project init + +Start an existing project VM: + + bcvk project up + +SSH into the project VM: + + bcvk project ssh + +Shut down the project VM: + + bcvk project down + +Remove the project VM entirely: + + bcvk project rm + +Complete development workflow with automatic updates: + + # Initialize project + bcvk project init + + # Start VM with auto-update enabled + bcvk project up --auto-update --ssh + + # In another terminal, make changes and rebuild + vim Containerfile + podman build -t localhost/my-app:dev . + + # Changes are automatically applied within 30 seconds + # Or manually trigger immediate upgrade: + bcvk project ssh -A + +# SEE ALSO + +**bcvk**(8), **bcvk-project-init**(8), **bcvk-project-up**(8), **bcvk-project-down**(8), **bcvk-project-ssh**(8), **bcvk-project-rm**(8) + +# VERSION + + diff --git a/docs/src/man/bcvk.md b/docs/src/man/bcvk.md index 9894bf8..db80ba1 100644 --- a/docs/src/man/bcvk.md +++ b/docs/src/man/bcvk.md @@ -15,6 +15,7 @@ a libvirt virtual machine, and connect with `ssh`. The toolkit includes commands for: +- Project-scoped VM management with Vagrant-like workflow - Running ephemeral VMs for testing container images - Installing bootc containers to persistent disk images - Managing libvirt integration and VM lifecycle @@ -30,6 +31,10 @@ bcvk-hostexec(8) : Execute commands on the host system from within containers +bcvk-project(8) + +: Project-scoped VM management (Vagrant-like workflow) + bcvk-images(8) : Manage and inspect bootc container images