diff --git a/crates/lib/src/bootc_kargs.rs b/crates/lib/src/bootc_kargs.rs index 028b6a5d9..ec1be4e6c 100644 --- a/crates/lib/src/bootc_kargs.rs +++ b/crates/lib/src/bootc_kargs.rs @@ -116,7 +116,8 @@ pub(crate) fn get_kargs( fetched: &ImageState, ) -> Result> { let cancellable = gio::Cancellable::NONE; - let repo = &sysroot.repo(); + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); let mut kargs = vec![]; let sys_arch = std::env::consts::ARCH; @@ -129,7 +130,7 @@ pub(crate) fn get_kargs( }; // Get the kargs in kargs.d of the merge - let merge_root = &crate::utils::deployment_fd(sysroot, merge_deployment)?; + let merge_root = &crate::utils::deployment_fd(ostree, merge_deployment)?; let existing_kargs = get_kargs_in_root(merge_root, sys_arch)?; // Get the kargs in kargs.d of the pending image diff --git a/crates/lib/src/boundimage.rs b/crates/lib/src/boundimage.rs index ea26bed29..9eba4517c 100644 --- a/crates/lib/src/boundimage.rs +++ b/crates/lib/src/boundimage.rs @@ -13,7 +13,7 @@ use fn_error_context::context; use ostree_ext::containers_image_proxy; use ostree_ext::ostree::Deployment; -use crate::imgstorage::PullMode; +use crate::podstorage::{CStorage, PullMode}; use crate::store::Storage; /// The path in a root for bound images; this directory should only contain @@ -39,7 +39,7 @@ pub(crate) struct ResolvedBoundImage { /// Given a deployment, pull all container images it references. pub(crate) async fn pull_bound_images(sysroot: &Storage, deployment: &Deployment) -> Result<()> { - let bound_images = query_bound_images_for_deployment(sysroot, deployment)?; + let bound_images = query_bound_images_for_deployment(sysroot.get_ostree()?, deployment)?; pull_images(sysroot, bound_images).await } @@ -158,7 +158,7 @@ pub(crate) async fn pull_images( #[context("Pulling bound images")] pub(crate) async fn pull_images_impl( - imgstore: &crate::imgstorage::Storage, + imgstore: &CStorage, bound_images: Vec, ) -> Result<()> { let n = bound_images.len(); diff --git a/crates/lib/src/cli.rs b/crates/lib/src/cli.rs index a5c269c4b..7b19bf7de 100644 --- a/crates/lib/src/cli.rs +++ b/crates/lib/src/cli.rs @@ -24,6 +24,7 @@ use ostree_ext::container as ostree_container; use ostree_ext::container_utils::ostree_booted; use ostree_ext::keyfileext::KeyFileExt; use ostree_ext::ostree; +use ostree_ext::sysroot::SysrootLock; use schemars::schema_for; use serde::{Deserialize, Serialize}; @@ -778,13 +779,9 @@ fn has_soft_reboot_capability(deployment: Option<&crate::spec::BootEntry>) -> bo /// Prepare a soft reboot for the given deployment #[context("Preparing soft reboot")] -fn prepare_soft_reboot( - sysroot: &crate::store::Storage, - deployment: &ostree::Deployment, -) -> Result<()> { +fn prepare_soft_reboot(sysroot: &SysrootLock, deployment: &ostree::Deployment) -> Result<()> { let cancellable = ostree::gio::Cancellable::NONE; sysroot - .sysroot .deployment_set_soft_reboot(deployment, false, cancellable) .context("Failed to prepare soft-reboot")?; Ok(()) @@ -829,7 +826,7 @@ where /// Handle soft reboot for staged deployments (used by upgrade and switch) #[context("Handling staged soft reboot")] fn handle_staged_soft_reboot( - sysroot: &crate::store::Storage, + sysroot: &SysrootLock, soft_reboot_mode: Option, host: &crate::spec::Host, ) -> Result<()> { @@ -843,7 +840,7 @@ fn handle_staged_soft_reboot( /// Perform a soft reboot for a staged deployment #[context("Soft reboot staged deployment")] -fn soft_reboot_staged(sysroot: &crate::store::Storage) -> Result<()> { +fn soft_reboot_staged(sysroot: &SysrootLock) -> Result<()> { println!("Staged deployment is soft-reboot capable, preparing for soft-reboot..."); let deployments_list = sysroot.deployments(); @@ -858,7 +855,7 @@ fn soft_reboot_staged(sysroot: &crate::store::Storage) -> Result<()> { /// Perform a soft reboot for a rollback deployment #[context("Soft reboot rollback deployment")] -fn soft_reboot_rollback(sysroot: &crate::store::Storage) -> Result<()> { +fn soft_reboot_rollback(sysroot: &SysrootLock) -> Result<()> { println!("Rollback deployment is soft-reboot capable, preparing for soft-reboot..."); let deployments_list = sysroot.deployments(); @@ -910,9 +907,9 @@ fn prepare_for_write() -> Result<()> { #[context("Upgrading")] async fn upgrade(opts: UpgradeOpts) -> Result<()> { let sysroot = &get_storage().await?; - let repo = &sysroot.repo(); - let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); + let (booted_deployment, _deployments, host) = crate::status::get_status_require_booted(ostree)?; let imgref = host.spec.image.as_ref(); let prog: ProgressWriter = opts.progress.try_into()?; @@ -988,7 +985,7 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> { .unwrap_or_default(); if staged_unchanged { println!("Staged update present, not changed."); - handle_staged_soft_reboot(sysroot, opts.soft_reboot, &host)?; + handle_staged_soft_reboot(ostree, opts.soft_reboot, &host)?; if opts.apply { crate::reboot::reboot()?; } @@ -1013,8 +1010,8 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> { if opts.soft_reboot.is_some() { // At this point we have new staged deployment and the host definition has changed. // We need the updated host status before we check if we can prepare the soft-reboot. - let updated_host = crate::status::get_status(sysroot, Some(&booted_deployment))?.1; - handle_staged_soft_reboot(sysroot, opts.soft_reboot, &updated_host)?; + let updated_host = crate::status::get_status(ostree, Some(&booted_deployment))?.1; + handle_staged_soft_reboot(ostree, opts.soft_reboot, &updated_host)?; } if opts.apply { @@ -1058,9 +1055,9 @@ async fn switch(opts: SwitchOpts) -> Result<()> { let cancellable = gio::Cancellable::NONE; let sysroot = &get_storage().await?; - let repo = &sysroot.repo(); - let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); + let (booted_deployment, _deployments, host) = crate::status::get_status_require_booted(ostree)?; let new_spec = { let mut new_spec = host.spec.clone(); @@ -1095,8 +1092,8 @@ async fn switch(opts: SwitchOpts) -> Result<()> { if opts.soft_reboot.is_some() { // At this point we have staged the deployment and the host definition has changed. // We need the updated host status before we check if we can prepare the soft-reboot. - let updated_host = crate::status::get_status(sysroot, Some(&booted_deployment))?.1; - handle_staged_soft_reboot(sysroot, opts.soft_reboot, &updated_host)?; + let updated_host = crate::status::get_status(ostree, Some(&booted_deployment))?.1; + handle_staged_soft_reboot(ostree, opts.soft_reboot, &updated_host)?; } if opts.apply { @@ -1110,17 +1107,18 @@ async fn switch(opts: SwitchOpts) -> Result<()> { #[context("Rollback")] async fn rollback(opts: RollbackOpts) -> Result<()> { let sysroot = &get_storage().await?; + let ostree = sysroot.get_ostree()?; crate::deploy::rollback(sysroot).await?; if opts.soft_reboot.is_some() { // Get status of rollback deployment to check soft-reboot capability - let host = crate::status::get_status_require_booted(sysroot)?.2; + let host = crate::status::get_status_require_booted(ostree)?.2; handle_soft_reboot( opts.soft_reboot, host.status.rollback.as_ref(), "rollback", - || soft_reboot_rollback(sysroot), + || soft_reboot_rollback(ostree), )?; } @@ -1135,10 +1133,10 @@ async fn rollback(opts: RollbackOpts) -> Result<()> { #[context("Editing spec")] async fn edit(opts: EditOpts) -> Result<()> { let sysroot = &get_storage().await?; - let repo = &sysroot.repo(); + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); - let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + let (booted_deployment, _deployments, host) = crate::status::get_status_require_booted(ostree)?; let new_host: Host = if let Some(filename) = opts.filename { let mut r = std::io::BufReader::new(std::fs::File::open(filename)?); serde_yaml::from_reader(&mut r)? diff --git a/crates/lib/src/deploy.rs b/crates/lib/src/deploy.rs index b003e70f7..e10500ab9 100644 --- a/crates/lib/src/deploy.rs +++ b/crates/lib/src/deploy.rs @@ -304,10 +304,11 @@ async fn handle_layer_progress_print( /// Gather all bound images in all deployments, then prune the image store, /// using the gathered images as the roots (that will not be GC'd). pub(crate) async fn prune_container_store(sysroot: &Storage) -> Result<()> { - let deployments = sysroot.deployments(); + let ostree = sysroot.get_ostree()?; + let deployments = ostree.deployments(); let mut all_bound_images = Vec::new(); for deployment in deployments { - let bound = crate::boundimage::query_bound_images_for_deployment(sysroot, &deployment)?; + let bound = crate::boundimage::query_bound_images_for_deployment(ostree, &deployment)?; all_bound_images.extend(bound.into_iter()); } // Convert to a hashset of just the image names @@ -463,11 +464,11 @@ pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> { let bound_prune = prune_container_store(sysroot); // We create clones (just atomic reference bumps) here to move to the thread. - let repo = sysroot.repo(); - let sysroot = sysroot.sysroot.clone(); + let ostree = sysroot.get_ostree_cloned()?; + let repo = ostree.repo(); let repo_prune = ostree_ext::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { - let locked_sysroot = &SysrootLock::from_assumed_locked(&sysroot); + let locked_sysroot = &SysrootLock::from_assumed_locked(&ostree); let cancellable = Some(cancellable); let repo = &repo; let txn = repo.auto_transaction(cancellable)?; @@ -488,7 +489,7 @@ pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> { // Then, for each deployment which is derived (e.g. has configmaps) we synthesize // a base ref to ensure that it's not GC'd. - for (i, deployment) in sysroot.deployments().into_iter().enumerate() { + for (i, deployment) in ostree.deployments().into_iter().enumerate() { let commit = deployment.csum(); if let Some(base) = get_base_commit(repo, &commit)? { repo.transaction_set_refspec(&format!("{BASE_IMAGE_PREFIX}/{i}"), Some(&base)); @@ -543,7 +544,7 @@ async fn deploy( None }; // Clone all the things to move to worker thread - let sysroot_clone = sysroot.sysroot.clone(); + let ostree = sysroot.get_ostree_cloned()?; // ostree::Deployment is incorrectly !Send 😢 so convert it to an integer let merge_deployment = merge_deployment.map(|d| d.index() as usize); let stateroot = stateroot.to_string(); @@ -553,7 +554,7 @@ async fn deploy( let r = async_task_with_spinner( "Deploying", spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> { - let sysroot = sysroot_clone; + let ostree = ostree; let stateroot = Some(stateroot); let mut opts = ostree::SysrootDeployTreeOpts::default(); @@ -565,11 +566,11 @@ async fn deploy( if let Some(kargs) = override_kargs.as_deref() { opts.override_kernel_argv = Some(&kargs); } - let deployments = sysroot.deployments(); + let deployments = ostree.deployments(); let merge_deployment = merge_deployment.map(|m| &deployments[m]); let origin = glib::KeyFile::new(); origin.load_from_data(&origin_data, glib::KeyFileFlags::NONE)?; - let d = sysroot.stage_tree_with_options( + let d = ostree.stage_tree_with_options( stateroot.as_deref(), &ostree_commit, Some(&origin), @@ -582,7 +583,8 @@ async fn deploy( ) .await?; // SAFETY: We must have a staged deployment - let staged = sysroot.staged_deployment().unwrap(); + let ostree = sysroot.get_ostree()?; + let staged = ostree.staged_deployment().unwrap(); assert_eq!(staged.index(), r); Ok(staged) } @@ -608,6 +610,7 @@ pub(crate) async fn stage( spec: &RequiredHostSpec<'_>, prog: ProgressWriter, ) -> Result<()> { + let ostree = sysroot.get_ostree()?; let mut subtask = SubTaskStep { subtask: "merging".into(), description: "Merging Image".into(), @@ -629,7 +632,7 @@ pub(crate) async fn stage( .collect(), }) .await; - let merge_deployment = sysroot.merge_deployment(Some(stateroot)); + let merge_deployment = ostree.merge_deployment(Some(stateroot)); subtask.completed = true; subtasks.push(subtask.clone()); @@ -740,8 +743,8 @@ pub(crate) async fn stage( /// Implementation of rollback functionality pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> { const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468"; - let repo = &sysroot.repo(); - let (booted_deployment, deployments, host) = crate::status::get_status_require_booted(sysroot)?; + let ostree = sysroot.get_ostree()?; + let (booted_deployment, deployments, host) = crate::status::get_status_require_booted(ostree)?; let new_spec = { let mut new_spec = host.spec.clone(); @@ -749,6 +752,8 @@ pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> { new_spec }; + let repo = &ostree.repo(); + // Just to be sure host.spec.verify_transition(&new_spec)?; @@ -788,7 +793,7 @@ pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> { .chain(deployments.other) .collect::>(); tracing::debug!("Writing new deployments: {new_deployments:?}"); - sysroot.write_deployments(&new_deployments, gio::Cancellable::NONE)?; + ostree.write_deployments(&new_deployments, gio::Cancellable::NONE)?; if reverting { println!("Next boot: current deployment"); } else { diff --git a/crates/lib/src/fsck.rs b/crates/lib/src/fsck.rs index fc1ffb210..973923e79 100644 --- a/crates/lib/src/fsck.rs +++ b/crates/lib/src/fsck.rs @@ -106,8 +106,9 @@ static CHECK_RESOLVCONF: FsckCheck = /// But at the current time fsck is an experimental feature that we should only be running /// in our CI. fn check_resolvconf(storage: &Storage) -> FsckResult { + let ostree = storage.get_ostree()?; // For now we only check the booted deployment. - if storage.booted_deployment().is_none() { + if ostree.booted_deployment().is_none() { return fsck_ok(); } // Read usr/etc/resolv.conf directly. @@ -240,7 +241,8 @@ fn check_fsverity(storage: &Storage) -> Pin } async fn check_fsverity_inner(storage: &Storage) -> FsckResult { - let repo = &storage.repo(); + let ostree = storage.get_ostree()?; + let repo = &ostree.repo(); let verity_state = ostree_ext::fsverity::is_verity_enabled(repo)?; tracing::debug!( "verity: expected={:?} found={:?}", @@ -249,7 +251,7 @@ async fn check_fsverity_inner(storage: &Storage) -> FsckResult { ); let verity_found_state = - verity_state_of_all_objects(&storage.repo(), verity_state.desired == Tristate::Enabled) + verity_state_of_all_objects(&ostree.repo(), verity_state.desired == Tristate::Enabled) .await?; let Some((missing, rest)) = collect_until( verity_found_state.missing.iter(), diff --git a/crates/lib/src/image.rs b/crates/lib/src/image.rs index 02556aeee..ad984ed6f 100644 --- a/crates/lib/src/image.rs +++ b/crates/lib/src/image.rs @@ -14,7 +14,7 @@ use serde::Serialize; use crate::{ boundimage::query_bound_images, cli::{ImageListFormat, ImageListType}, - imgstorage::ensure_floating_c_storage_initialized, + podstorage::{ensure_floating_c_storage_initialized, CStorage}, }; /// The name of the image we push to containers-storage if nothing is specified. @@ -42,7 +42,8 @@ struct ImageOutput { #[context("Listing host images")] fn list_host_images(sysroot: &crate::store::Storage) -> Result> { - let repo = sysroot.repo(); + let ostree = sysroot.get_ostree()?; + let repo = ostree.repo(); let images = ostree_ext::container::store::list_images(&repo).context("Querying images")?; Ok(images @@ -129,8 +130,8 @@ pub(crate) async fn list_entrypoint( pub(crate) async fn push_entrypoint(source: Option<&str>, target: Option<&str>) -> Result<()> { let transport = Transport::ContainerStorage; let sysroot = crate::cli::get_storage().await?; - - let repo = &sysroot.repo(); + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); // If the target isn't specified, push to containers-storage + our default image let target = if let Some(target) = target { @@ -150,7 +151,7 @@ pub(crate) async fn push_entrypoint(source: Option<&str>, target: Option<&str>) let source = if let Some(source) = source { ImageReference::try_from(source).context("Parsing source image")? } else { - let status = crate::status::get_status_require_booted(&sysroot)?; + let status = crate::status::get_status_require_booted(&ostree)?; // SAFETY: We know it's booted let booted = status.2.status.booted.unwrap(); let booted_image = booted.image.unwrap().image; @@ -171,7 +172,7 @@ pub(crate) async fn push_entrypoint(source: Option<&str>, target: Option<&str>) /// Thin wrapper for invoking `podman image ` but set up for our internal /// image store (as distinct from /var/lib/containers default). pub(crate) async fn imgcmd_entrypoint( - storage: &crate::imgstorage::Storage, + storage: &CStorage, arg: &str, args: &[std::ffi::OsString], ) -> std::result::Result<(), anyhow::Error> { diff --git a/crates/lib/src/install.rs b/crates/lib/src/install.rs index bada00301..28c701bb4 100644 --- a/crates/lib/src/install.rs +++ b/crates/lib/src/install.rs @@ -630,10 +630,7 @@ pub(crate) fn print_configuration() -> Result<()> { } #[context("Creating ostree deployment")] -async fn initialize_ostree_root( - state: &State, - root_setup: &RootSetup, -) -> Result<(Storage, bool, crate::imgstorage::Storage)> { +async fn initialize_ostree_root(state: &State, root_setup: &RootSetup) -> Result<(Storage, bool)> { let sepolicy = state.load_policy()?; let sepolicy = sepolicy.as_ref(); // Load a fd for the mounted target physical root @@ -727,11 +724,11 @@ async fn initialize_ostree_root( )?; } - let imgstore = crate::imgstorage::Storage::create(&sysroot_dir, &temp_run, sepolicy)?; - sysroot.load(cancellable)?; let sysroot = SysrootLock::new_from_sysroot(&sysroot).await?; - Ok((Storage::new(sysroot, &temp_run)?, has_ostree, imgstore)) + let storage = Storage::new(sysroot, &temp_run)?; + + Ok((storage, has_ostree)) } fn check_disk_space( @@ -1336,15 +1333,17 @@ async fn prepare_install( async fn install_with_sysroot( state: &State, rootfs: &RootSetup, - sysroot: &Storage, + storage: &Storage, boot_uuid: &str, bound_images: BoundImages, has_ostree: bool, - imgstore: &crate::imgstorage::Storage, ) -> Result<()> { + let ostree = storage.get_ostree()?; + let c_storage = storage.get_ensure_imgstore()?; + // And actually set up the container in that root, returning a deployment and // the aleph state (see below). - let (deployment, aleph) = install_container(state, rootfs, &sysroot, has_ostree).await?; + let (deployment, aleph) = install_container(state, rootfs, ostree, has_ostree).await?; // Write the aleph data that captures the system state at the time of provisioning for aid in future debugging. rootfs .physical_root @@ -1353,7 +1352,7 @@ async fn install_with_sysroot( }) .context("Writing aleph version")?; - let deployment_path = sysroot.deployment_dirpath(&deployment); + let deployment_path = ostree.deployment_dirpath(&deployment); if cfg!(target_arch = "s390x") { // TODO: Integrate s390x support into install_via_bootupd @@ -1376,11 +1375,11 @@ async fn install_with_sysroot( // Now copy each bound image from the host's container storage into the target. for image in resolved_bound_images { let image = image.image.as_str(); - imgstore.pull_from_host_storage(image).await?; + c_storage.pull_from_host_storage(image).await?; } } BoundImages::Unresolved(bound_images) => { - crate::boundimage::pull_images_impl(imgstore, bound_images) + crate::boundimage::pull_images_impl(c_storage, bound_images) .await .context("pulling bound images")?; } @@ -1460,7 +1459,7 @@ async fn install_to_filesystem_impl( // Initialize the ostree sysroot (repo, stateroot, etc.) { - let (sysroot, has_ostree, imgstore) = initialize_ostree_root(state, rootfs).await?; + let (sysroot, has_ostree) = initialize_ostree_root(state, rootfs).await?; install_with_sysroot( state, @@ -1469,12 +1468,12 @@ async fn install_to_filesystem_impl( &boot_uuid, bound_images, has_ostree, - &imgstore, ) .await?; + let ostree = sysroot.get_ostree()?; if matches!(cleanup, Cleanup::TriggerOnNextBoot) { - let sysroot_dir = crate::utils::sysroot_dir(&sysroot)?; + let sysroot_dir = crate::utils::sysroot_dir(ostree)?; tracing::debug!("Writing {DESTRUCTIVE_CLEANUP}"); sysroot_dir.atomic_write(format!("etc/{}", DESTRUCTIVE_CLEANUP), b"")?; } diff --git a/crates/lib/src/install/completion.rs b/crates/lib/src/install/completion.rs index cddff28d9..d834e7024 100644 --- a/crates/lib/src/install/completion.rs +++ b/crates/lib/src/install/completion.rs @@ -14,6 +14,7 @@ use ostree_ext::{gio, ostree}; use rustix::fs::Mode; use rustix::fs::OFlags; +use crate::podstorage::CStorage; use crate::utils::deployment_fd; use super::config; @@ -297,8 +298,7 @@ pub(crate) async fn impl_completion( // When we're run through ostree, we only lazily initialize the podman storage to avoid // having a hard dependency on it. - let imgstorage = - &crate::imgstorage::Storage::create(&sysroot_dir, &rundir, sepolicy.as_ref())?; + let imgstorage = &CStorage::create(&sysroot_dir, &rundir, sepolicy.as_ref())?; crate::boundimage::pull_images_impl(imgstorage, bound_images) .await .context("pulling bound images")?; diff --git a/crates/lib/src/lib.rs b/crates/lib/src/lib.rs index 4b40a9ecd..309d027a0 100644 --- a/crates/lib/src/lib.rs +++ b/crates/lib/src/lib.rs @@ -13,13 +13,13 @@ pub(crate) mod fsck; pub(crate) mod generator; mod glyph; mod image; -mod imgstorage; pub(crate) mod journal; mod k8sapitypes; mod lints; mod lsm; pub(crate) mod metadata; mod podman; +mod podstorage; mod progress_jsonl; mod reboot; pub mod spec; diff --git a/crates/lib/src/imgstorage.rs b/crates/lib/src/podstorage.rs similarity index 96% rename from crates/lib/src/imgstorage.rs rename to crates/lib/src/podstorage.rs index db7a4033e..ec68923cb 100644 --- a/crates/lib/src/imgstorage.rs +++ b/crates/lib/src/podstorage.rs @@ -1,9 +1,12 @@ -//! # bootc-managed container storage +//! # bootc-managed instance of containers-storage: //! -//! The default storage for this project uses ostree, canonically storing all of its state in -//! `/sysroot/ostree`. +//! The backend for podman and other tools is known as `container-storage:`, +//! with a canonical instance that lives in `/var/lib/containers`. //! -//! This containers-storage: which canonically lives in `/sysroot/ostree/bootc`. +//! This is a `containers-storage:` instance` which is owned by bootc and +//! is stored at `/sysroot/ostree/bootc`. +//! +//! At the current time, this is only used for Logically Bound Images. use std::collections::HashSet; use std::io::Seek; @@ -43,7 +46,9 @@ pub(crate) const SUBPATH: &str = "storage"; /// The path to the "runroot" with transient runtime state; this is /// relative to the /run directory const RUNROOT: &str = "bootc/storage"; -pub(crate) struct Storage { + +/// A bootc-owned instance of `containers-storage:`. +pub(crate) struct CStorage { /// The root directory sysroot: Dir, /// The location of container storage @@ -147,7 +152,7 @@ pub(crate) fn ensure_floating_c_storage_initialized() { } } -impl Storage { +impl CStorage { /// Create a `podman image` Command instance prepared to operate on our alternative /// root. pub(crate) fn new_image_cmd(&self) -> Result { @@ -380,5 +385,5 @@ impl Storage { #[cfg(test)] mod tests { use super::*; - static_assertions::assert_not_impl_any!(Storage: Sync); + static_assertions::assert_not_impl_any!(CStorage: Sync); } diff --git a/crates/lib/src/status.rs b/crates/lib/src/status.rs index 8cb03db1f..d9b33e431 100644 --- a/crates/lib/src/status.rs +++ b/crates/lib/src/status.rs @@ -22,7 +22,7 @@ use crate::cli::OutputFormat; use crate::spec::ImageStatus; use crate::spec::{BootEntry, BootOrder, Host, HostSpec, HostStatus, HostType}; use crate::spec::{ImageReference, ImageSignature}; -use crate::store::{CachedImageStatus, Storage}; +use crate::store::CachedImageStatus; impl From for ImageSignature { fn from(sig: ostree_container::SignatureSource) -> Self { @@ -91,7 +91,7 @@ impl From for OstreeImageReference { } /// Check if a deployment has soft reboot capability -fn has_soft_reboot_capability(sysroot: &Storage, deployment: &ostree::Deployment) -> bool { +fn has_soft_reboot_capability(sysroot: &SysrootLock, deployment: &ostree::Deployment) -> bool { ostree_ext::systemd_has_soft_reboot() && sysroot.deployment_can_soft_reboot(deployment) } @@ -166,7 +166,7 @@ fn imagestatus( /// Given an OSTree deployment, parse out metadata into our spec. #[context("Reading deployment metadata")] fn boot_entry_from_deployment( - sysroot: &Storage, + sysroot: &SysrootLock, deployment: &ostree::Deployment, ) -> Result { let ( @@ -230,7 +230,7 @@ impl BootEntry { /// A variant of [`get_status`] that requires a booted deployment. pub(crate) fn get_status_require_booted( - sysroot: &Storage, + sysroot: &SysrootLock, ) -> Result<(ostree::Deployment, Deployments, Host)> { let booted_deployment = sysroot.require_booted_deployment()?; let (deployments, host) = get_status(sysroot, Some(&booted_deployment))?; @@ -241,7 +241,7 @@ pub(crate) fn get_status_require_booted( /// a more native Rust structure. #[context("Computing status")] pub(crate) fn get_status( - sysroot: &Storage, + sysroot: &SysrootLock, booted_deployment: Option<&ostree::Deployment>, ) -> Result<(Deployments, Host)> { let stateroot = booted_deployment.as_ref().map(|d| d.osname()); @@ -347,8 +347,9 @@ pub(crate) async fn status(opts: super::cli::StatusOpts) -> Result<()> { Default::default() } else { let sysroot = super::cli::get_storage().await?; - let booted_deployment = sysroot.booted_deployment(); - let (_deployments, host) = get_status(&sysroot, booted_deployment.as_ref())?; + let ostree = sysroot.get_ostree()?; + let booted_deployment = ostree.booted_deployment(); + let (_deployments, host) = get_status(&ostree, booted_deployment.as_ref())?; host }; diff --git a/crates/lib/src/store/mod.rs b/crates/lib/src/store/mod.rs index e38d4f1df..ada6bb249 100644 --- a/crates/lib/src/store/mod.rs +++ b/crates/lib/src/store/mod.rs @@ -1,5 +1,22 @@ +//! The [`Store`] holds references to three different types of +//! storage: +//! +//! # OSTree +//! +//! The default backend for the bootable container store; this +//! lives in `/ostree` in the physical root. +//! +//! # containers-storage: +//! +//! Later, bootc gained support for Logically Bound Images. +//! This is a `containers-storage:` instance that lives +//! in `/ostree/bootc/storage` +//! +//! # composefs +//! +//! This lives in `/composefs` in the physical root. + use std::cell::OnceCell; -use std::ops::Deref; use std::sync::Arc; use anyhow::{Context, Result}; @@ -9,10 +26,12 @@ use cap_std_ext::dirext::CapStdExtDirExt; use fn_error_context::context; use composefs; +use ostree_ext::ostree; use ostree_ext::sysroot::SysrootLock; use rustix::fs::Mode; use crate::lsm; +use crate::podstorage::CStorage; use crate::spec::ImageStatus; use crate::utils::deployment_fd; @@ -31,16 +50,18 @@ pub const COMPOSEFS_MODE: Mode = Mode::from_raw_mode(0o700); /// system root pub(crate) const BOOTC_ROOT: &str = "ostree/bootc"; +/// A reference to a physical filesystem root, plus +/// accessors for the different types of container storage. pub(crate) struct Storage { /// Directory holding the physical root pub physical_root: Dir, /// The OSTree storage - pub sysroot: SysrootLock, + ostree: SysrootLock, /// The composefs storage - pub composefs: OnceCell>, + composefs: OnceCell>, /// The containers-image storage used foR LBIs - imgstore: OnceCell, + imgstore: OnceCell, /// Our runtime state run: Dir, @@ -52,14 +73,6 @@ pub(crate) struct CachedImageStatus { pub cached_update: Option, } -impl Deref for Storage { - type Target = SysrootLock; - - fn deref(&self) -> &Self::Target { - &self.sysroot - } -} - impl Storage { pub fn new(sysroot: SysrootLock, run: &Dir) -> Result { let run = run.try_clone()?; @@ -82,21 +95,32 @@ impl Storage { Ok(Self { physical_root, - sysroot, + ostree: sysroot, run, composefs: Default::default(), imgstore: Default::default(), }) } + /// Access the underlying ostree repository + pub(crate) fn get_ostree(&self) -> Result<&SysrootLock> { + Ok(&self.ostree) + } + + /// Access the underlying ostree repository + pub(crate) fn get_ostree_cloned(&self) -> Result { + let r = self.get_ostree()?; + Ok((*r).clone()) + } + /// Access the image storage; will automatically initialize it if necessary. - pub(crate) fn get_ensure_imgstore(&self) -> Result<&crate::imgstorage::Storage> { + pub(crate) fn get_ensure_imgstore(&self) -> Result<&CStorage> { if let Some(imgstore) = self.imgstore.get() { return Ok(imgstore); } - let sysroot_dir = crate::utils::sysroot_dir(&self.sysroot)?; + let sysroot_dir = crate::utils::sysroot_dir(&self.ostree)?; - let sepolicy = if self.sysroot.booted_deployment().is_none() { + let sepolicy = if self.ostree.booted_deployment().is_none() { // fallback to policy from container root // this should only happen during cleanup of a broken install tracing::trace!("falling back to container root's selinux policy"); @@ -106,15 +130,14 @@ impl Storage { // load the sepolicy from the booted ostree deployment so the imgstorage can be // properly labeled with /var/lib/container/storage labels tracing::trace!("loading sepolicy from booted ostree deployment"); - let dep = self.sysroot.booted_deployment().unwrap(); - let dep_fs = deployment_fd(&self.sysroot, &dep)?; + let dep = self.ostree.booted_deployment().unwrap(); + let dep_fs = deployment_fd(&self.ostree, &dep)?; lsm::new_sepolicy_at(&dep_fs)? }; tracing::trace!("sepolicy in get_ensure_imgstore: {sepolicy:?}"); - let imgstore = - crate::imgstorage::Storage::create(&sysroot_dir, &self.run, sepolicy.as_ref())?; + let imgstore = CStorage::create(&sysroot_dir, &self.run, sepolicy.as_ref())?; Ok(self.imgstore.get_or_init(|| imgstore)) } @@ -132,7 +155,7 @@ impl Storage { // Bootstrap verity off of the ostree state. In practice this means disabled by // default right now. - let ostree_repo = &self.sysroot.repo(); + let ostree_repo = &self.ostree.repo(); let ostree_verity = ostree_ext::fsverity::is_verity_enabled(ostree_repo)?; if !ostree_verity.enabled { tracing::debug!("Setting insecure mode for composefs repo"); @@ -147,7 +170,7 @@ impl Storage { #[context("Updating storage root mtime")] pub(crate) fn update_mtime(&self) -> Result<()> { let sysroot_dir = - crate::utils::sysroot_dir(&self.sysroot).context("Reopen sysroot directory")?; + crate::utils::sysroot_dir(&self.ostree).context("Reopen sysroot directory")?; sysroot_dir .update_timestamps(std::path::Path::new(BOOTC_ROOT))