Skip to content

Commit 8b746e9

Browse files
committed
storage: a series fixups for stargz chunks
For rafs v6 in fscache daemon, we must make compatible with stargz chunks. Signed-off-by: Yan Song <[email protected]>
1 parent be414e5 commit 8b746e9

File tree

9 files changed

+93
-22
lines changed

9 files changed

+93
-22
lines changed

rafs/src/fs.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,7 @@ use nydus_utils::metrics::{self, FopRecorder, StatsFop::*};
4040

4141
use crate::metadata::layout::RAFS_ROOT_INODE;
4242
use crate::metadata::{
43-
Inode, PostWalkAction, RafsInode, RafsSuper, RafsSuperMeta, DOT, DOTDOT,
44-
RAFS_DEFAULT_CHUNK_SIZE,
43+
Inode, PostWalkAction, RafsInode, RafsSuper, RafsSuperMeta, DOT, DOTDOT, RAFS_MAX_CHUNK_SIZE,
4544
};
4645
use crate::{RafsError, RafsIoReader, RafsResult};
4746

@@ -104,9 +103,9 @@ impl TryFrom<&RafsConfig> for BlobPrefetchConfig {
104103
type Error = RafsError;
105104

106105
fn try_from(c: &RafsConfig) -> RafsResult<Self> {
107-
if c.fs_prefetch.merging_size as u64 > RAFS_DEFAULT_CHUNK_SIZE {
106+
if c.fs_prefetch.merging_size as u64 > RAFS_MAX_CHUNK_SIZE {
108107
return Err(RafsError::Configure(
109-
"Merging size can't exceed chunk size".to_string(),
108+
"merging size can't exceed max chunk size".to_string(),
110109
));
111110
} else if c.fs_prefetch.enable && c.fs_prefetch.threads_count == 0 {
112111
return Err(RafsError::Configure(
@@ -923,8 +922,8 @@ impl FileSystem for Rafs {
923922
#[cfg(test)]
924923
pub(crate) mod tests {
925924
use super::*;
925+
use crate::metadata::RAFS_DEFAULT_CHUNK_SIZE;
926926
use crate::RafsIoRead;
927-
use storage::RAFS_MAX_CHUNK_SIZE;
928927

929928
pub fn new_rafs_backend() -> Box<Rafs> {
930929
let config = r#"
@@ -1074,7 +1073,7 @@ pub(crate) mod tests {
10741073
config.fs_prefetch.merging_size = RAFS_MAX_CHUNK_SIZE as usize + 1;
10751074
assert!(BlobPrefetchConfig::try_from(&config).is_err());
10761075

1077-
config.fs_prefetch.merging_size = RAFS_MAX_CHUNK_SIZE as usize;
1076+
config.fs_prefetch.merging_size = RAFS_DEFAULT_CHUNK_SIZE as usize;
10781077
config.fs_prefetch.bandwidth_rate = 1;
10791078
config.fs_prefetch.prefetch_all = true;
10801079
assert!(BlobPrefetchConfig::try_from(&config).is_ok());

rafs/src/metadata/layout/v6.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ use lazy_static::lazy_static;
1717
use nydus_utils::{compress, digest, round_up, ByteSize};
1818
use storage::device::{BlobFeatures, BlobInfo};
1919
use storage::meta::{BlobMetaHeaderOndisk, BLOB_FEATURE_4K_ALIGNED};
20+
use storage::RAFS_MAX_CHUNK_SIZE;
2021

2122
use crate::metadata::{layout::RafsXAttrs, RafsStore, RafsSuperFlags};
2223
use crate::{impl_bootstrap_converter, impl_pub_getter_setter, RafsIoReader, RafsIoWrite};
@@ -352,7 +353,10 @@ impl RafsV6SuperBlockExt {
352353
}
353354

354355
let chunk_size = u32::from_le(self.s_chunk_size) as u64;
355-
if !chunk_size.is_power_of_two() || chunk_size < EROFS_BLOCK_SIZE {
356+
if !chunk_size.is_power_of_two()
357+
|| chunk_size < EROFS_BLOCK_SIZE
358+
|| chunk_size > RAFS_MAX_CHUNK_SIZE
359+
{
356360
return Err(einval!("invalid chunk size in Rafs v6 extended superblock"));
357361
}
358362

@@ -1292,7 +1296,11 @@ impl RafsV6Blob {
12921296
}
12931297

12941298
let c_size = u32::from_le(self.chunk_size) as u64;
1295-
if c_size.count_ones() != 1 || c_size < EROFS_BLOCK_SIZE || c_size != chunk_size as u64 {
1299+
if c_size.count_ones() != 1
1300+
|| c_size < EROFS_BLOCK_SIZE
1301+
|| c_size > RAFS_MAX_CHUNK_SIZE
1302+
|| c_size != chunk_size as u64
1303+
{
12961304
error!(
12971305
"RafsV6Blob: idx {} invalid c_size {}, count_ones() {}",
12981306
blob_index,

src/bin/nydus-image/main.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ use nydus_api::http::BackendConfig;
2929
use nydus_app::{setup_logging, BuildTimeInfo};
3030
use nydus_rafs::RafsIoReader;
3131
use nydus_storage::factory::BlobFactory;
32-
use nydus_storage::RAFS_DEFAULT_CHUNK_SIZE;
32+
use nydus_storage::{RAFS_DEFAULT_CHUNK_SIZE, RAFS_MAX_CHUNK_SIZE};
3333
use nydus_utils::{compress, digest};
3434

3535
use crate::builder::{Builder, DiffBuilder, DirectoryBuilder, StargzBuilder};
@@ -967,7 +967,10 @@ impl Command {
967967
let param = v.trim_start_matches("0x").trim_end_matches("0X");
968968
let chunk_size =
969969
u32::from_str_radix(param, 16).context(format!("invalid chunk size {}", v))?;
970-
if chunk_size < 0x1000 || !chunk_size.is_power_of_two() {
970+
if chunk_size as u64 > RAFS_MAX_CHUNK_SIZE
971+
|| chunk_size < 0x1000
972+
|| !chunk_size.is_power_of_two()
973+
{
971974
bail!("invalid chunk size: {}", chunk_size);
972975
}
973976
Ok(chunk_size)

src/bin/nydusd/fs_cache.rs

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,16 @@ impl FsCacheHandler {
584584
}
585585
Some(obj) => match obj.fetch_range_uncompressed(msg.off, msg.len) {
586586
Ok(v) if v == msg.len as usize => {}
587-
_ => debug!("fscache: failed to read data from blob object"),
587+
Ok(v) => {
588+
warn!(
589+
"fscache: read data from blob object not matched: {} != {}",
590+
v, msg.len
591+
);
592+
}
593+
Err(e) => error!(
594+
"{}",
595+
format!("fscache: failed to read data from blob object: {}", e,)
596+
),
588597
},
589598
}
590599
}

storage/src/cache/cachedfile.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,34 @@ impl BlobObject for FileCacheEntry {
359359

360360
impl FileCacheEntry {
361361
fn do_fetch_chunks(&self, chunks: &[BlobIoChunk]) -> Result<usize> {
362+
if self.is_stargz() {
363+
// FIXME: for stargz, we need to implement fetching multiple chunks. here
364+
// is a heavy overhead workaround, needs to be optimized.
365+
for chunk in chunks {
366+
let mut buf = alloc_buf(chunk.uncompress_size() as usize);
367+
self.read_raw_chunk(chunk, &mut buf, false, None)
368+
.map_err(|e| {
369+
eio!(format!(
370+
"read_raw_chunk failed to read and decompress stargz chunk, {:?}",
371+
e
372+
))
373+
})?;
374+
if self.dio_enabled {
375+
self.adjust_buffer_for_dio(&mut buf)
376+
}
377+
Self::persist_chunk(&self.file, chunk.uncompress_offset(), &buf).map_err(|e| {
378+
eio!(format!(
379+
"do_fetch_chunk failed to persist stargz chunk, {:?}",
380+
e
381+
))
382+
})?;
383+
self.chunk_map
384+
.set_ready_and_clear_pending(chunk.as_base())
385+
.unwrap_or_else(|e| error!("set stargz chunk ready failed, {}", e));
386+
}
387+
return Ok(0);
388+
}
389+
362390
debug_assert!(!chunks.is_empty());
363391
let bitmap = self
364392
.chunk_map

storage/src/cache/fscache/mod.rs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -165,9 +165,6 @@ impl FileCacheEntry {
165165
if blob_info.has_feature(BlobFeatures::V5_NO_EXT_BLOB_TABLE) {
166166
return Err(einval!("fscache does not support Rafs v5 blobs"));
167167
}
168-
if blob_info.is_stargz() {
169-
return Err(einval!("fscache does not support stargz blob file"));
170-
}
171168
let file = blob_info
172169
.get_fscache_file()
173170
.ok_or_else(|| einval!("No fscache file associated with the blob_info"))?;
@@ -210,7 +207,7 @@ impl FileCacheEntry {
210207
is_get_blob_object_supported: true,
211208
is_compressed: false,
212209
is_direct_chunkmap: true,
213-
is_stargz: false,
210+
is_stargz: blob_info.is_stargz(),
214211
dio_enabled: true,
215212
need_validate: mgr.validate,
216213
prefetch_config,

storage/src/cache/worker.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -422,8 +422,8 @@ mod tests {
422422
let config = Arc::new(AsyncPrefetchConfig {
423423
enable: true,
424424
threads_count: 4,
425-
merging_size: 0x100000,
426-
bandwidth_rate: 0x100000,
425+
merging_size: 0x1000000,
426+
bandwidth_rate: 0x1000000,
427427
});
428428

429429
let mgr = Arc::new(AsyncWorkerMgr::new(metrics, config).unwrap());
@@ -444,7 +444,7 @@ mod tests {
444444
assert_eq!(mgr.prefetch_inflight.load(Ordering::Acquire), 0);
445445

446446
assert!(mgr
447-
.send_prefetch_message(AsyncPrefetchMessage::RateLimiter(0x100001))
447+
.send_prefetch_message(AsyncPrefetchMessage::RateLimiter(0x1000001))
448448
.is_ok());
449449
assert!(mgr
450450
.send_prefetch_message(AsyncPrefetchMessage::RateLimiter(u64::MAX))

storage/src/lib.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ macro_rules! impl_getter {
7272

7373
/// Default blob chunk size.
7474
pub const RAFS_DEFAULT_CHUNK_SIZE: u64 = 1024 * 1024;
75-
/// Maximum blob chunk size.
76-
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024;
75+
/// Maximum blob chunk size, 16MB.
76+
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024 * 16;
7777

7878
/// Error codes related to storage subsystem.
7979
#[derive(Debug)]

storage/src/meta/mod.rs

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,7 @@ impl BlobMetaInfo {
407407
chunks: chunk_infos,
408408
base: base as *const u8,
409409
unmap_len: expected_size,
410+
is_stargz: blob_info.is_stargz(),
410411
});
411412

412413
Ok(BlobMetaInfo { state })
@@ -465,7 +466,9 @@ impl BlobMetaInfo {
465466
index += 1;
466467
let entry = &infos[index];
467468
self.validate_chunk(entry)?;
468-
if entry.uncompressed_offset() != last_end {
469+
470+
// For stargz chunks, disable this check.
471+
if !self.state.is_stargz && entry.uncompressed_offset() != last_end {
469472
return Err(einval!(format!(
470473
"mismatch uncompressed {} size {} last_end {}",
471474
entry.uncompressed_offset(),
@@ -562,7 +565,8 @@ impl BlobMetaInfo {
562565

563566
#[inline]
564567
fn validate_chunk(&self, entry: &BlobChunkInfoOndisk) -> Result<()> {
565-
if entry.compressed_end() > self.state.compressed_size
568+
// For stargz blob, self.state.compressed_size == 0, so don't validate it.
569+
if (!self.state.is_stargz && entry.compressed_end() > self.state.compressed_size)
566570
|| entry.uncompressed_end() > self.state.uncompressed_size
567571
{
568572
Err(einval!())
@@ -646,6 +650,8 @@ pub struct BlobMetaState {
646650
chunks: ManuallyDrop<Vec<BlobChunkInfoOndisk>>,
647651
base: *const u8,
648652
unmap_len: usize,
653+
/// The blob meta is for an stargz image.
654+
is_stargz: bool,
649655
}
650656

651657
// // Safe to Send/Sync because the underlying data structures are readonly
@@ -671,6 +677,25 @@ impl BlobMetaState {
671677
let mut start = 0;
672678
let mut end = 0;
673679

680+
if self.is_stargz {
681+
// FIXME: since stargz chunks are not currently allocated chunk index in the order of uncompressed_offset,
682+
// a binary search is not available for now, here is a heavy overhead workaround, need to be fixed.
683+
for i in 0..self.chunk_count {
684+
let off = if compressed {
685+
chunks[i as usize].compressed_offset()
686+
} else {
687+
chunks[i as usize].uncompressed_offset()
688+
};
689+
if addr == off {
690+
return Ok(i as usize);
691+
}
692+
}
693+
return Err(einval!(format!(
694+
"can't find stargz chunk by offset {}",
695+
addr,
696+
)));
697+
}
698+
674699
while left < right {
675700
let mid = left + size / 2;
676701
// SAFETY: the call is made safe by the following invariants:
@@ -799,6 +824,7 @@ mod tests {
799824
]),
800825
base: std::ptr::null(),
801826
unmap_len: 0,
827+
is_stargz: false,
802828
};
803829

804830
assert_eq!(state.get_chunk_index_nocheck(0, false).unwrap(), 0);
@@ -883,6 +909,7 @@ mod tests {
883909
]),
884910
base: std::ptr::null(),
885911
unmap_len: 0,
912+
is_stargz: false,
886913
};
887914
let info = BlobMetaInfo {
888915
state: Arc::new(state),

0 commit comments

Comments
 (0)