Skip to content

Commit c23c9ae

Browse files
committed
Fix (or allow) remaining clippy lints
Assisted-by: Claude Code Signed-off-by: Colin Walters <[email protected]>
1 parent 9bc7fe9 commit c23c9ae

File tree

4 files changed

+97
-92
lines changed

4 files changed

+97
-92
lines changed

crates/lib/src/deploy.rs

Lines changed: 57 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -138,26 +138,29 @@ fn prefix_of_progress(p: &ImportProgress) -> &'static str {
138138
}
139139
}
140140

141-
/// Write container fetch progress to standard output.
142-
async fn handle_layer_progress_print(
143-
mut layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
144-
mut layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
141+
/// Configuration for layer progress printing
142+
struct LayerProgressConfig {
143+
layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
144+
layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
145145
digest: Box<str>,
146146
n_layers_to_fetch: usize,
147147
layers_total: usize,
148148
bytes_to_download: u64,
149149
bytes_total: u64,
150150
prog: ProgressWriter,
151151
quiet: bool,
152-
) -> ProgressWriter {
152+
}
153+
154+
/// Write container fetch progress to standard output.
155+
async fn handle_layer_progress_print(mut config: LayerProgressConfig) -> ProgressWriter {
153156
let start = std::time::Instant::now();
154157
let mut total_read = 0u64;
155158
let bar = indicatif::MultiProgress::new();
156-
if quiet {
159+
if config.quiet {
157160
bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
158161
}
159162
let layers_bar = bar.add(indicatif::ProgressBar::new(
160-
n_layers_to_fetch.try_into().unwrap(),
163+
config.n_layers_to_fetch.try_into().unwrap(),
161164
));
162165
let byte_bar = bar.add(indicatif::ProgressBar::new(0));
163166
// let byte_bar = indicatif::ProgressBar::new(0);
@@ -185,7 +188,7 @@ async fn handle_layer_progress_print(
185188
tokio::select! {
186189
// Always handle layer changes first.
187190
biased;
188-
layer = layers.recv() => {
191+
layer = config.layers.recv() => {
189192
if let Some(l) = layer {
190193
let layer = descriptor_of_progress(&l);
191194
let layer_type = prefix_of_progress(&l);
@@ -213,16 +216,16 @@ async fn handle_layer_progress_print(
213216
// Emit an event where bytes == total to signal completion.
214217
subtask.bytes = layer_size;
215218
subtasks.push(subtask.clone());
216-
prog.send(Event::ProgressBytes {
219+
config.prog.send(Event::ProgressBytes {
217220
task: "pulling".into(),
218-
description: format!("Pulling Image: {digest}").into(),
219-
id: (*digest).into(),
220-
bytes_cached: bytes_total - bytes_to_download,
221+
description: format!("Pulling Image: {}", config.digest).into(),
222+
id: (*config.digest).into(),
223+
bytes_cached: config.bytes_total - config.bytes_to_download,
221224
bytes: total_read,
222-
bytes_total: bytes_to_download,
223-
steps_cached: (layers_total - n_layers_to_fetch) as u64,
225+
bytes_total: config.bytes_to_download,
226+
steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
224227
steps: layers_bar.position(),
225-
steps_total: n_layers_to_fetch as u64,
228+
steps_total: config.n_layers_to_fetch as u64,
226229
subtasks: subtasks.clone(),
227230
}).await;
228231
}
@@ -231,28 +234,28 @@ async fn handle_layer_progress_print(
231234
break
232235
};
233236
},
234-
r = layer_bytes.changed() => {
237+
r = config.layer_bytes.changed() => {
235238
if r.is_err() {
236239
// If the receiver is disconnected, then we're done
237240
break
238241
}
239242
let bytes = {
240-
let bytes = layer_bytes.borrow_and_update();
243+
let bytes = config.layer_bytes.borrow_and_update();
241244
bytes.as_ref().cloned()
242245
};
243246
if let Some(bytes) = bytes {
244247
byte_bar.set_position(bytes.fetched);
245248
subtask.bytes = byte_bar.position();
246-
prog.send_lossy(Event::ProgressBytes {
249+
config.prog.send_lossy(Event::ProgressBytes {
247250
task: "pulling".into(),
248-
description: format!("Pulling Image: {digest}").into(),
249-
id: (*digest).into(),
250-
bytes_cached: bytes_total - bytes_to_download,
251+
description: format!("Pulling Image: {}", config.digest).into(),
252+
id: (*config.digest).into(),
253+
bytes_cached: config.bytes_total - config.bytes_to_download,
251254
bytes: total_read + byte_bar.position(),
252-
bytes_total: bytes_to_download,
253-
steps_cached: (layers_total - n_layers_to_fetch) as u64,
255+
bytes_total: config.bytes_to_download,
256+
steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
254257
steps: layers_bar.position(),
255-
steps_total: n_layers_to_fetch as u64,
258+
steps_total: config.n_layers_to_fetch as u64,
256259
subtasks: subtasks.clone().into_iter().chain([subtask.clone()]).collect(),
257260
}).await;
258261
}
@@ -280,25 +283,27 @@ async fn handle_layer_progress_print(
280283
// Since the progress notifier closed, we know import has started
281284
// use as a heuristic to begin import progress
282285
// Cannot be lossy or it is dropped
283-
prog.send(Event::ProgressSteps {
284-
task: "importing".into(),
285-
description: "Importing Image".into(),
286-
id: (*digest).into(),
287-
steps_cached: 0,
288-
steps: 0,
289-
steps_total: 1,
290-
subtasks: [SubTaskStep {
291-
subtask: "importing".into(),
286+
config
287+
.prog
288+
.send(Event::ProgressSteps {
289+
task: "importing".into(),
292290
description: "Importing Image".into(),
293-
id: "importing".into(),
294-
completed: false,
295-
}]
296-
.into(),
297-
})
298-
.await;
291+
id: (*config.digest).into(),
292+
steps_cached: 0,
293+
steps: 0,
294+
steps_total: 1,
295+
subtasks: [SubTaskStep {
296+
subtask: "importing".into(),
297+
description: "Importing Image".into(),
298+
id: "importing".into(),
299+
completed: false,
300+
}]
301+
.into(),
302+
})
303+
.await;
299304

300305
// Return the writer
301-
prog
306+
config.prog
302307
}
303308

304309
/// Gather all bound images in all deployments, then prune the image store,
@@ -332,7 +337,7 @@ pub(crate) struct PreparedImportMeta {
332337
}
333338

334339
pub(crate) enum PreparedPullResult {
335-
Ready(PreparedImportMeta),
340+
Ready(Box<PreparedImportMeta>),
336341
AlreadyPresent(Box<ImageState>),
337342
}
338343

@@ -372,7 +377,7 @@ pub(crate) async fn prepare_for_pull(
372377
prep,
373378
};
374379

375-
Ok(PreparedPullResult::Ready(prepared_image))
380+
Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
376381
}
377382

378383
#[context("Pulling")]
@@ -388,17 +393,17 @@ pub(crate) async fn pull_from_prepared(
388393
let digest_imp = prepared_image.digest.clone();
389394

390395
let printer = tokio::task::spawn(async move {
391-
handle_layer_progress_print(
392-
layer_progress,
393-
layer_byte_progress,
394-
digest.as_ref().into(),
395-
prepared_image.n_layers_to_fetch,
396-
prepared_image.layers_total,
397-
prepared_image.bytes_to_fetch,
398-
prepared_image.bytes_total,
396+
handle_layer_progress_print(LayerProgressConfig {
397+
layers: layer_progress,
398+
layer_bytes: layer_byte_progress,
399+
digest: digest.as_ref().into(),
400+
n_layers_to_fetch: prepared_image.n_layers_to_fetch,
401+
layers_total: prepared_image.layers_total,
402+
bytes_to_download: prepared_image.bytes_to_fetch,
403+
bytes_total: prepared_image.bytes_total,
399404
prog,
400405
quiet,
401-
)
406+
})
402407
.await
403408
});
404409
let import = prepared_image.imp.import(prepared_image.prep).await;
@@ -444,7 +449,7 @@ pub(crate) async fn pull(
444449
match prepare_for_pull(repo, imgref, target_imgref).await? {
445450
PreparedPullResult::AlreadyPresent(existing) => Ok(existing),
446451
PreparedPullResult::Ready(prepared_image_meta) => {
447-
Ok(pull_from_prepared(imgref, quiet, prog, prepared_image_meta).await?)
452+
Ok(pull_from_prepared(imgref, quiet, prog, *prepared_image_meta).await?)
448453
}
449454
}
450455
}

crates/lib/src/install.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ async fn install_container(
760760
PreparedPullResult::AlreadyPresent(existing) => existing,
761761
PreparedPullResult::Ready(image_meta) => {
762762
check_disk_space(root_setup.physical_root.as_fd(), &image_meta, &spec_imgref)?;
763-
pull_from_prepared(&spec_imgref, false, ProgressWriter::default(), image_meta).await?
763+
pull_from_prepared(&spec_imgref, false, ProgressWriter::default(), *image_meta).await?
764764
}
765765
};
766766

crates/lib/src/spec.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,8 @@ fn canonicalize_reference(reference: Reference) -> Option<Reference> {
9898
reference.tag()?;
9999

100100
// No digest? Also pass through.
101-
let Some(digest) = reference.digest() else {
102-
return None;
103-
};
104-
101+
let digest = reference.digest()?;
102+
// Otherwise, replace with the digest
105103
Some(reference.clone_with_digest(digest.to_owned()))
106104
}
107105

crates/tmpfiles/src/lib.rs

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -263,14 +263,16 @@ pub fn var_to_tmpfiles<U: uzers::Users, G: uzers::Groups>(
263263
let mut prefix = PathBuf::from("/var");
264264
let mut unsupported = Vec::new();
265265
convert_path_to_tmpfiles_d_recurse(
266+
&TmpfilesConvertConfig {
267+
users,
268+
groups,
269+
rootfs,
270+
existing: &existing_tmpfiles,
271+
readonly: false,
272+
},
266273
&mut entries,
267274
&mut unsupported,
268-
users,
269-
groups,
270-
rootfs,
271-
&existing_tmpfiles,
272275
&mut prefix,
273-
false,
274276
)?;
275277

276278
// If there's no entries, don't write a file
@@ -309,52 +311,59 @@ pub fn var_to_tmpfiles<U: uzers::Users, G: uzers::Groups>(
309311
})
310312
}
311313

314+
/// Configuration for recursive tmpfiles conversion
315+
struct TmpfilesConvertConfig<'a, U: uzers::Users, G: uzers::Groups> {
316+
users: &'a U,
317+
groups: &'a G,
318+
rootfs: &'a Dir,
319+
existing: &'a BTreeMap<PathBuf, String>,
320+
readonly: bool,
321+
}
322+
312323
/// Recursively explore target directory and translate content to tmpfiles.d entries. See
313324
/// `convert_var_to_tmpfiles_d` for more background.
314325
///
315326
/// This proceeds depth-first and progressively deletes translated subpaths as it goes.
316327
/// `prefix` is updated at each recursive step, so that in case of errors it can be
317328
/// used to pinpoint the faulty path.
318329
fn convert_path_to_tmpfiles_d_recurse<U: uzers::Users, G: uzers::Groups>(
330+
config: &TmpfilesConvertConfig<'_, U, G>,
319331
out_entries: &mut BTreeSet<String>,
320332
out_unsupported: &mut Vec<PathBuf>,
321-
users: &U,
322-
groups: &G,
323-
rootfs: &Dir,
324-
existing: &BTreeMap<PathBuf, String>,
325333
prefix: &mut PathBuf,
326-
readonly: bool,
327334
) -> Result<()> {
328335
let relpath = prefix.strip_prefix("/").unwrap();
329-
for subpath in rootfs.read_dir(relpath)? {
336+
for subpath in config.rootfs.read_dir(relpath)? {
330337
let subpath = subpath?;
331338
let meta = subpath.metadata()?;
332339
let fname = subpath.file_name();
333340
prefix.push(fname);
334341

335-
let has_tmpfiles_entry = existing.contains_key(prefix);
342+
let has_tmpfiles_entry = config.existing.contains_key(prefix);
336343

337344
// Translate this file entry.
338345
if !has_tmpfiles_entry {
339346
let entry = {
340347
// SAFETY: We know this path is absolute
341348
let relpath = prefix.strip_prefix("/").unwrap();
342-
let Some(tmpfiles_meta) = FileMeta::from_fs(rootfs, &relpath)? else {
349+
let Some(tmpfiles_meta) = FileMeta::from_fs(config.rootfs, &relpath)? else {
343350
out_unsupported.push(relpath.into());
344351
assert!(prefix.pop());
345352
continue;
346353
};
347354
let uid = meta.uid();
348355
let gid = meta.gid();
349-
let user = users
356+
let user = config
357+
.users
350358
.get_user_by_uid(meta.uid())
351359
.ok_or(Error::UserNotFound(uid))?;
352360
let username = user.name();
353361
let username: &str = username.to_str().ok_or_else(|| Error::NonUtf8User {
354362
uid,
355363
name: username.to_string_lossy().into_owned(),
356364
})?;
357-
let group = groups
365+
let group = config
366+
.groups
358367
.get_group_by_gid(gid)
359368
.ok_or(Error::GroupNotFound(gid))?;
360369
let groupname = group.name();
@@ -371,27 +380,18 @@ fn convert_path_to_tmpfiles_d_recurse<U: uzers::Users, G: uzers::Groups>(
371380
// SAFETY: We know this path is absolute
372381
let relpath = prefix.strip_prefix("/").unwrap();
373382
// Avoid traversing mount points by default
374-
if rootfs.open_dir_noxdev(relpath)?.is_some() {
375-
convert_path_to_tmpfiles_d_recurse(
376-
out_entries,
377-
out_unsupported,
378-
users,
379-
groups,
380-
rootfs,
381-
existing,
382-
prefix,
383-
readonly,
384-
)?;
383+
if config.rootfs.open_dir_noxdev(relpath)?.is_some() {
384+
convert_path_to_tmpfiles_d_recurse(config, out_entries, out_unsupported, prefix)?;
385385
let relpath = prefix.strip_prefix("/").unwrap();
386-
if !readonly {
387-
rootfs.remove_dir_all(relpath)?;
386+
if !config.readonly {
387+
config.rootfs.remove_dir_all(relpath)?;
388388
}
389389
}
390390
} else {
391391
// SAFETY: We know this path is absolute
392392
let relpath = prefix.strip_prefix("/").unwrap();
393-
if !readonly {
394-
rootfs.remove_file(relpath)?;
393+
if !config.readonly {
394+
config.rootfs.remove_file(relpath)?;
395395
}
396396
}
397397
assert!(prefix.pop());
@@ -435,14 +435,16 @@ pub fn find_missing_tmpfiles_current_root() -> Result<TmpfilesResult> {
435435
let mut tmpfiles = BTreeSet::new();
436436
let mut unsupported = Vec::new();
437437
convert_path_to_tmpfiles_d_recurse(
438+
&TmpfilesConvertConfig {
439+
users: &usergroups,
440+
groups: &usergroups,
441+
rootfs: &rootfs,
442+
existing: &existing_tmpfiles,
443+
readonly: true,
444+
},
438445
&mut tmpfiles,
439446
&mut unsupported,
440-
&usergroups,
441-
&usergroups,
442-
&rootfs,
443-
&existing_tmpfiles,
444447
&mut prefix,
445-
true,
446448
)?;
447449
Ok(TmpfilesResult {
448450
tmpfiles,

0 commit comments

Comments
 (0)