Skip to content

Commit 9f19235

Browse files
committed
Rework API to use a Kubernetes CRD
I was working on the configmap support slowly, but a big issue it raised was how we expose the ability to mutate things. Our current "API" is just an imperative CLI. This would require higher level tooling to manage Kubernetes style declarative system state. Instead, let's use the really nice Rust `kube` crate to define a CRD; this is what is output (in YAML form) via `bootc status` now. We translate the imperative CLI verbs into changes to the `spec` field. However, things become more compelling when we offer a `bootc edit` CLI verb that allows arbitrary changes to the spec. I think this will become the *only* way to manage attached configmaps, instead of having imperative CLI verbs like `bootc configmap add` etc. At least to start. Signed-off-by: Colin Walters <[email protected]>
1 parent 41ebc7f commit 9f19235

File tree

8 files changed

+362
-223
lines changed

8 files changed

+362
-223
lines changed

lib/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,19 @@ hex = "^0.4"
1919
fn-error-context = "0.2.0"
2020
gvariant = "0.4.0"
2121
indicatif = "0.17.0"
22+
k8s-openapi = { version = "0.18.0", features = ["v1_25"] }
23+
kube = { version = "0.83.0", features = ["runtime", "derive"] }
2224
libc = "^0.2"
2325
liboverdrop = "0.1.0"
2426
once_cell = "1.9"
2527
openssl = "^0.10"
2628
nix = ">= 0.24, < 0.26"
2729
regex = "1.7.1"
2830
rustix = { "version" = "0.37", features = ["thread", "process"] }
31+
schemars = "0.8.6"
2932
serde = { features = ["derive"], version = "1.0.125" }
3033
serde_json = "1.0.64"
34+
serde_yaml = "0.9.17"
3135
serde_with = ">= 1.9.4, < 2"
3236
tokio = { features = ["io-std", "time", "process", "rt", "net"], version = ">= 1.13.0" }
3337
tokio-util = { features = ["io-util"], version = "0.7" }

lib/src/cli.rs

Lines changed: 81 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ use std::ffi::OsString;
1919
use std::os::unix::process::CommandExt;
2020
use std::process::Command;
2121

22+
use crate::spec::HostSpec;
23+
use crate::spec::ImageReference;
24+
2225
/// Perform an upgrade operation
2326
#[derive(Debug, Parser)]
2427
pub(crate) struct UpgradeOpts {
@@ -174,9 +177,10 @@ pub(crate) async fn get_locked_sysroot() -> Result<ostree_ext::sysroot::SysrootL
174177
#[context("Pulling")]
175178
async fn pull(
176179
repo: &ostree::Repo,
177-
imgref: &OstreeImageReference,
180+
imgref: &ImageReference,
178181
quiet: bool,
179182
) -> Result<Box<LayeredImageState>> {
183+
let imgref = &OstreeImageReference::from(imgref.clone());
180184
let config = Default::default();
181185
let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
182186
let prep = match imp.prepare().await? {
@@ -215,22 +219,35 @@ async fn pull(
215219
async fn stage(
216220
sysroot: &SysrootLock,
217221
stateroot: &str,
218-
imgref: &ostree_container::OstreeImageReference,
219222
image: Box<LayeredImageState>,
220-
origin: &glib::KeyFile,
223+
spec: &HostSpec,
221224
) -> Result<()> {
222225
let cancellable = gio::Cancellable::NONE;
223226
let stateroot = Some(stateroot);
224227
let merge_deployment = sysroot.merge_deployment(stateroot);
228+
let origin = glib::KeyFile::new();
229+
let ostree_imgref = spec
230+
.image
231+
.as_ref()
232+
.map(|imgref| OstreeImageReference::from(imgref.clone()));
233+
if let Some(imgref) = ostree_imgref.as_ref() {
234+
origin.set_string(
235+
"origin",
236+
ostree_container::deploy::ORIGIN_CONTAINER,
237+
imgref.to_string().as_str(),
238+
);
239+
}
225240
let _new_deployment = sysroot.stage_tree_with_options(
226241
stateroot,
227242
image.merge_commit.as_str(),
228-
Some(origin),
243+
Some(&origin),
229244
merge_deployment.as_ref(),
230245
&Default::default(),
231246
cancellable,
232247
)?;
233-
println!("Queued for next boot: {imgref}");
248+
if let Some(imgref) = ostree_imgref.as_ref() {
249+
println!("Queued for next boot: {imgref}");
250+
}
234251
Ok(())
235252
}
236253

@@ -266,30 +283,30 @@ async fn prepare_for_write() -> Result<()> {
266283
async fn upgrade(opts: UpgradeOpts) -> Result<()> {
267284
prepare_for_write().await?;
268285
let sysroot = &get_locked_sysroot().await?;
269-
let repo = &sysroot.repo();
270286
let booted_deployment = &sysroot.require_booted_deployment()?;
271-
let status = crate::status::DeploymentStatus::from_deployment(booted_deployment, true)?;
272-
let osname = booted_deployment.osname();
273-
let origin = booted_deployment
274-
.origin()
275-
.ok_or_else(|| anyhow::anyhow!("Deployment is missing an origin"))?;
276-
let imgref = status
277-
.image
278-
.ok_or_else(|| anyhow::anyhow!("Booted deployment is not container image based"))?;
279-
let imgref: OstreeImageReference = imgref.into();
280-
if !status.supported {
287+
let (_deployments, host) = crate::status::get_status(sysroot, Some(booted_deployment))?;
288+
// SAFETY: There must be a status if we have a booted deployment
289+
let status = host.status.unwrap();
290+
let imgref = host.spec.image.as_ref();
291+
// If there's no specified image, let's be nice and check if the booted system is using rpm-ostree
292+
if imgref.is_none() && status.booted.map_or(false, |b| b.incompatible) {
281293
return Err(anyhow::anyhow!(
282294
"Booted deployment contains local rpm-ostree modifications; cannot upgrade via bootc"
283295
));
284296
}
285-
let commit = booted_deployment.csum();
286-
let state = ostree_container::store::query_image_commit(repo, &commit)?;
287-
let digest = state.manifest_digest.as_str();
288-
297+
let imgref = imgref.ok_or_else(|| anyhow::anyhow!("No image source specified"))?;
298+
// Find the currently queued digest, if any before we pull
299+
let queued_digest = status
300+
.staged
301+
.as_ref()
302+
.and_then(|e| e.image.as_ref())
303+
.map(|img| img.image_digest.as_str());
289304
if opts.check {
290305
// pull the image manifest without the layers
291306
let config = Default::default();
292-
let mut imp = ostree_container::store::ImageImporter::new(repo, &imgref, config).await?;
307+
let imgref = &OstreeImageReference::from(imgref.clone());
308+
let mut imp =
309+
ostree_container::store::ImageImporter::new(&sysroot.repo(), imgref, config).await?;
293310
match imp.prepare().await? {
294311
PrepareResult::AlreadyPresent(c) => {
295312
println!(
@@ -298,24 +315,27 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> {
298315
);
299316
return Ok(());
300317
}
301-
PrepareResult::Ready(p) => {
318+
PrepareResult::Ready(r) => {
319+
// TODO show a diff
302320
println!(
303-
"New manifest available for {}. Digest {}",
304-
imgref, p.manifest_digest
321+
"New image available for {imgref}. Digest {}",
322+
r.manifest_digest
305323
);
324+
// Note here we'll fall through to handling the --touch-if-changed below
306325
}
307326
}
308327
} else {
309-
let fetched = pull(repo, &imgref, opts.quiet).await?;
310-
311-
if fetched.merge_commit.as_str() == commit.as_str() {
312-
println!("Already queued: {digest}");
313-
return Ok(());
328+
let fetched = pull(&sysroot.repo(), imgref, opts.quiet).await?;
329+
if let Some(queued_digest) = queued_digest {
330+
if fetched.merge_commit.as_str() == queued_digest {
331+
println!("Already queued: {queued_digest}");
332+
return Ok(());
333+
}
314334
}
315335

316-
stage(sysroot, &osname, &imgref, fetched, &origin).await?;
336+
let osname = booted_deployment.osname();
337+
stage(sysroot, &osname, fetched, &host.spec).await?;
317338
}
318-
319339
if let Some(path) = opts.touch_if_changed {
320340
std::fs::write(&path, "").with_context(|| format!("Writing {path}"))?;
321341
}
@@ -327,14 +347,14 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> {
327347
#[context("Switching")]
328348
async fn switch(opts: SwitchOpts) -> Result<()> {
329349
prepare_for_write().await?;
330-
331350
let cancellable = gio::Cancellable::NONE;
332-
let sysroot = get_locked_sysroot().await?;
333-
let booted_deployment = &sysroot.require_booted_deployment()?;
334-
let (origin, booted_image) = crate::utils::get_image_origin(booted_deployment)?;
335-
let booted_refspec = origin.optional_string("origin", "refspec")?;
336-
let osname = booted_deployment.osname();
351+
352+
let sysroot = &get_locked_sysroot().await?;
337353
let repo = &sysroot.repo();
354+
let booted_deployment = &sysroot.require_booted_deployment()?;
355+
let (_deployments, host) = crate::status::get_status(sysroot, Some(booted_deployment))?;
356+
// SAFETY: There must be a status if we have a booted deployment
357+
let status = host.status.unwrap();
338358

339359
let transport = ostree_container::Transport::try_from(opts.transport.as_str())?;
340360
let imgref = ostree_container::ImageReference {
@@ -349,30 +369,38 @@ async fn switch(opts: SwitchOpts) -> Result<()> {
349369
SignatureSource::ContainerPolicy
350370
};
351371
let target = ostree_container::OstreeImageReference { sigverify, imgref };
372+
let target = ImageReference::from(target);
373+
374+
let new_spec = {
375+
let mut new_spec = host.spec.clone();
376+
new_spec.image = Some(target.clone());
377+
new_spec
378+
};
379+
380+
if new_spec == host.spec {
381+
anyhow::bail!("No changes in current host spec");
382+
}
352383

353384
let fetched = pull(repo, &target, opts.quiet).await?;
354385

355386
if !opts.retain {
356387
// By default, we prune the previous ostree ref or container image
357-
if let Some(ostree_ref) = booted_refspec {
358-
let (remote, ostree_ref) =
359-
ostree::parse_refspec(&ostree_ref).context("Failed to parse ostree ref")?;
360-
repo.set_ref_immediate(remote.as_deref(), &ostree_ref, None, cancellable)?;
361-
origin.remove_key("origin", "refspec")?;
362-
} else if let Some(booted_image) = booted_image.as_ref() {
363-
ostree_container::store::remove_image(repo, &booted_image.imgref)?;
364-
let _nlayers: u32 = ostree_container::store::gc_image_layers(repo)?;
388+
if let Some(booted_origin) = booted_deployment.origin() {
389+
if let Some(ostree_ref) = booted_origin.optional_string("origin", "refspec")? {
390+
let (remote, ostree_ref) =
391+
ostree::parse_refspec(&ostree_ref).context("Failed to parse ostree ref")?;
392+
repo.set_ref_immediate(remote.as_deref(), &ostree_ref, None, cancellable)?;
393+
} else if let Some(booted_image) = status.booted.as_ref().and_then(|b| b.image.as_ref())
394+
{
395+
let imgref = OstreeImageReference::from(booted_image.image.clone());
396+
ostree_container::store::remove_image(repo, &imgref.imgref)?;
397+
let _nlayers: u32 = ostree_container::store::gc_image_layers(repo)?;
398+
}
365399
}
366400
}
367401

368-
// We always make a fresh origin to toss out old state.
369-
let origin = glib::KeyFile::new();
370-
origin.set_string(
371-
"origin",
372-
ostree_container::deploy::ORIGIN_CONTAINER,
373-
target.to_string().as_str(),
374-
);
375-
stage(&sysroot, &osname, &target, fetched, &origin).await?;
402+
let stateroot = booted_deployment.osname();
403+
stage(sysroot, &stateroot, fetched, &new_spec).await?;
376404

377405
Ok(())
378406
}

lib/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ mod install;
3636
pub(crate) mod mount;
3737
#[cfg(feature = "install")]
3838
mod podman;
39+
pub mod spec;
3940
#[cfg(feature = "install")]
4041
mod task;
4142

lib/src/privtests.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use rustix::fd::AsFd;
77
use xshell::{cmd, Shell};
88

99
use super::cli::TestingOpts;
10+
use super::spec::Host;
1011

1112
const IMGSIZE: u64 = 20 * 1024 * 1024 * 1024;
1213

@@ -101,9 +102,9 @@ pub(crate) fn impl_run_host() -> Result<()> {
101102
pub(crate) fn impl_run_container() -> Result<()> {
102103
assert!(ostree_ext::container_utils::is_ostree_container()?);
103104
let sh = Shell::new()?;
104-
let stout = cmd!(sh, "bootc status").read()?;
105-
assert!(stout.contains("Running in a container (ostree base)."));
106-
drop(stout);
105+
let host: Host = serde_yaml::from_str(&cmd!(sh, "bootc status").read()?)?;
106+
let status = host.status.unwrap();
107+
assert!(status.is_container);
107108
for c in ["upgrade", "update"] {
108109
let o = Command::new("bootc").arg(c).output()?;
109110
let st = o.status;

lib/src/spec.rs

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
//! The definition for host system state.
2+
3+
use kube::CustomResource;
4+
use schemars::JsonSchema;
5+
use serde::{Deserialize, Serialize};
6+
7+
/// Representation of a bootc host system
8+
#[derive(
9+
CustomResource, Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, JsonSchema,
10+
)]
11+
#[kube(
12+
group = "org.containers.bootc",
13+
version = "v1alpha1",
14+
kind = "BootcHost",
15+
struct = "Host",
16+
namespaced,
17+
status = "HostStatus",
18+
derive = "PartialEq",
19+
derive = "Default"
20+
)]
21+
#[serde(rename_all = "camelCase")]
22+
pub struct HostSpec {
23+
/// The host image
24+
pub image: Option<ImageReference>,
25+
}
26+
27+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)]
28+
/// An image signature
29+
#[serde(rename_all = "camelCase")]
30+
pub enum ImageSignature {
31+
/// Fetches will use the named ostree remote for signature verification of the ostree commit.
32+
OstreeRemote(String),
33+
/// Fetches will defer to the `containers-policy.json`, but we make a best effort to reject `default: insecureAcceptAnything` policy.
34+
ContainerPolicy,
35+
/// No signature verification will be performed
36+
Insecure,
37+
}
38+
39+
/// A container image reference with attached transport and signature verification
40+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)]
41+
#[serde(rename_all = "camelCase")]
42+
pub struct ImageReference {
43+
/// The container image reference
44+
pub image: String,
45+
/// The container image transport
46+
pub transport: String,
47+
/// Disable signature verification
48+
pub signature: ImageSignature,
49+
}
50+
51+
/// The status of the booted image
52+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)]
53+
#[serde(rename_all = "camelCase")]
54+
pub struct ImageStatus {
55+
/// The currently booted image
56+
pub image: ImageReference,
57+
/// The digest of the fetched image (e.g. sha256:a0...);
58+
pub image_digest: String,
59+
}
60+
61+
/// A bootable entry
62+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)]
63+
#[serde(rename_all = "camelCase")]
64+
pub struct BootEntryOstree {
65+
/// The ostree commit checksum
66+
pub checksum: String,
67+
/// The deployment serial
68+
pub deploy_serial: u32,
69+
}
70+
71+
/// A bootable entry
72+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)]
73+
#[serde(rename_all = "camelCase")]
74+
pub struct BootEntry {
75+
/// The image reference
76+
pub image: Option<ImageStatus>,
77+
/// Whether this boot entry is not compatible (has origin changes bootc does not understand)
78+
pub incompatible: bool,
79+
/// Whether this entry will be subject to garbage collection
80+
pub pinned: bool,
81+
/// If this boot entry is ostree based, the corresponding state
82+
pub ostree: Option<BootEntryOstree>,
83+
}
84+
85+
/// The status of the host system
86+
#[derive(Debug, Clone, Serialize, Default, Deserialize, PartialEq, Eq, JsonSchema)]
87+
#[serde(rename_all = "camelCase")]
88+
pub struct HostStatus {
89+
/// The staged image for the next boot
90+
pub staged: Option<BootEntry>,
91+
/// The booted image; this will be unset if the host is not bootc compatible.
92+
pub booted: Option<BootEntry>,
93+
/// The previously booted image
94+
pub rollback: Option<BootEntry>,
95+
96+
/// Whether or not the current system state is an ostree-based container
97+
pub is_container: bool,
98+
}

0 commit comments

Comments
 (0)