From a84798beb506816e58909c88a1af60dbaa10d4eb Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 14:56:43 -0800 Subject: [PATCH 1/6] commit reader --- kernel/src/lib.rs | 1 + kernel/src/log_reader/commit.rs | 131 ++++++++++++++++++++++++++++++++ kernel/src/log_reader/mod.rs | 1 + kernel/src/log_segment.rs | 13 +--- kernel/src/scan/log_replay.rs | 2 +- 5 files changed, 137 insertions(+), 11 deletions(-) create mode 100644 kernel/src/log_reader/commit.rs create mode 100644 kernel/src/log_reader/mod.rs diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 0f611cfbc..205fb0e9a 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -93,6 +93,7 @@ pub mod error; pub mod expressions; mod log_compaction; mod log_path; +mod log_reader; pub mod scan; pub mod schema; pub mod snapshot; diff --git a/kernel/src/log_reader/commit.rs b/kernel/src/log_reader/commit.rs new file mode 100644 index 000000000..e0d66c895 --- /dev/null +++ b/kernel/src/log_reader/commit.rs @@ -0,0 +1,131 @@ +//! Commit phase for log replay - processes JSON commit files. + +use std::sync::Arc; + +use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME}; +use crate::log_replay::ActionsBatch; +use crate::log_segment::LogSegment; +use crate::schema::SchemaRef; +use crate::{DeltaResult, Engine}; + +/// Phase that processes JSON commit files. +pub(crate) struct CommitReader { + actions: Box> + Send>, +} + +impl CommitReader { + /// Create a new commit phase from a log segment. + /// + /// # Parameters + /// - `log_segment`: The log segment to process + /// - `engine`: Engine for reading files + pub(crate) fn try_new( + engine: &dyn Engine, + log_segment: &LogSegment, + schema: SchemaRef, + ) -> DeltaResult { + let commit_files = log_segment.find_commit_cover(); + let actions = engine + .json_handler() + .read_json_files(&commit_files, schema, None)? + .map(|batch| batch.map(|b| ActionsBatch::new(b, true))); + + Ok(Self { + actions: Box::new(actions), + }) + } +} + +impl Iterator for CommitReader { + type Item = DeltaResult; + + fn next(&mut self) -> Option { + self.actions.next() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::engine::default::executor::tokio::TokioBackgroundExecutor; + use crate::engine::default::DefaultEngine; + use crate::log_replay::LogReplayProcessor; + use crate::scan::log_replay::ScanLogReplayProcessor; + use crate::scan::state_info::StateInfo; + use object_store::local::LocalFileSystem; + use std::path::PathBuf; + use std::sync::Arc as StdArc; + + fn load_test_table( + table_name: &str, + ) -> DeltaResult<( + StdArc>, + StdArc, + url::Url, + )> { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/data"); + path.push(table_name); + + let path = std::fs::canonicalize(path) + .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?; + + let url = url::Url::from_directory_path(path) + .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; + + let store = StdArc::new(LocalFileSystem::new()); + let engine = StdArc::new(DefaultEngine::new(store)); + let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; + + Ok((engine, snapshot, url)) + } + + #[test] + fn test_commit_phase_processes_commits() -> DeltaResult<()> { + let (engine, snapshot, _url) = load_test_table("table-without-dv-small")?; + let log_segment = StdArc::new(snapshot.log_segment().clone()); + + let state_info = StdArc::new(StateInfo::try_new( + snapshot.schema(), + snapshot.table_configuration(), + None, + (), + )?); + + let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?; + let mut commit_phase = CommitPhase::try_new(&log_segment, engine.clone())?; + + let mut batch_count = 0; + let mut file_paths = Vec::new(); + + for result in commit_phase { + let batch = result?; + let metadata = processor.process_actions_batch(batch)?; + let paths = metadata.visit_scan_files( + vec![], + |ps: &mut Vec, path, _, _, _, _, _| { + ps.push(path.to_string()); + }, + )?; + file_paths.extend(paths); + batch_count += 1; + } + + // table-without-dv-small has exactly 1 commit file + assert_eq!( + batch_count, 1, + "table-without-dv-small should have exactly 1 commit batch" + ); + + // table-without-dv-small has exactly 1 add file + file_paths.sort(); + let expected_files = + vec!["part-00000-517f5d32-9c95-48e8-82b4-0229cc194867-c000.snappy.parquet"]; + assert_eq!( + file_paths, expected_files, + "CommitPhase should find exactly the expected file" + ); + + Ok(()) + } +} diff --git a/kernel/src/log_reader/mod.rs b/kernel/src/log_reader/mod.rs new file mode 100644 index 000000000..d1337ef59 --- /dev/null +++ b/kernel/src/log_reader/mod.rs @@ -0,0 +1 @@ +pub(crate) mod commit; diff --git a/kernel/src/log_segment.rs b/kernel/src/log_segment.rs index 4eb5e1ee7..e6ef5fd41 100644 --- a/kernel/src/log_segment.rs +++ b/kernel/src/log_segment.rs @@ -9,6 +9,7 @@ use crate::actions::{ PROTOCOL_NAME, SIDECAR_NAME, }; use crate::last_checkpoint_hint::LastCheckpointHint; +use crate::log_reader::commit::CommitReader; use crate::log_replay::ActionsBatch; use crate::path::{LogPathFileType, ParsedLogPath}; use crate::schema::{SchemaRef, StructField, ToSchema as _}; @@ -303,15 +304,7 @@ impl LogSegment { meta_predicate: Option, ) -> DeltaResult> + Send> { // `replay` expects commit files to be sorted in descending order, so the return value here is correct - let commits_and_compactions = self.find_commit_cover(); - let commit_stream = engine - .json_handler() - .read_json_files( - &commits_and_compactions, - commit_read_schema, - meta_predicate.clone(), - )? - .map_ok(|batch| ActionsBatch::new(batch, true)); + let commit_stream = CommitReader::try_new(engine, self, commit_read_schema)?; let checkpoint_stream = self.create_checkpoint_stream(engine, checkpoint_read_schema, meta_predicate)?; @@ -340,7 +333,7 @@ impl LogSegment { /// returns files is DESCENDING ORDER, as that's what `replay` expects. This function assumes /// that all files in `self.ascending_commit_files` and `self.ascending_compaction_files` are in /// range for this log segment. This invariant is maintained by our listing code. - fn find_commit_cover(&self) -> Vec { + pub(crate) fn find_commit_cover(&self) -> Vec { // Create an iterator sorted in ascending order by (initial version, end version), e.g. // [00.json, 00.09.compacted.json, 00.99.compacted.json, 01.json, 02.json, ..., 10.json, // 10.19.compacted.json, 11.json, ...] diff --git a/kernel/src/scan/log_replay.rs b/kernel/src/scan/log_replay.rs index 7bf69eb72..e0fe51b3f 100644 --- a/kernel/src/scan/log_replay.rs +++ b/kernel/src/scan/log_replay.rs @@ -53,7 +53,7 @@ pub(crate) struct ScanLogReplayProcessor { impl ScanLogReplayProcessor { /// Create a new [`ScanLogReplayProcessor`] instance - fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult { + pub(crate) fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult { // Extract the physical predicate from StateInfo's PhysicalPredicate enum. // The DataSkippingFilter and partition_filter components expect the predicate // in the format Option<(PredicateRef, SchemaRef)>, so we need to convert from From 07824d35761a15c556b33e435486fea9be8e90fa Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 15:46:59 -0800 Subject: [PATCH 2/6] improve commit --- kernel/src/log_reader/commit.rs | 6 ++++-- kernel/src/scan/mod.rs | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/src/log_reader/commit.rs b/kernel/src/log_reader/commit.rs index e0d66c895..17ac94bf3 100644 --- a/kernel/src/log_reader/commit.rs +++ b/kernel/src/log_reader/commit.rs @@ -52,6 +52,7 @@ mod tests { use crate::log_replay::LogReplayProcessor; use crate::scan::log_replay::ScanLogReplayProcessor; use crate::scan::state_info::StateInfo; + use crate::scan::COMMIT_READ_SCHEMA; use object_store::local::LocalFileSystem; use std::path::PathBuf; use std::sync::Arc as StdArc; @@ -93,7 +94,8 @@ mod tests { )?); let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?; - let mut commit_phase = CommitPhase::try_new(&log_segment, engine.clone())?; + let schema = COMMIT_READ_SCHEMA.clone(); + let mut commit_phase = CommitReader::try_new(engine.as_ref(), &log_segment, schema)?; let mut batch_count = 0; let mut file_paths = Vec::new(); @@ -123,7 +125,7 @@ mod tests { vec!["part-00000-517f5d32-9c95-48e8-82b4-0229cc194867-c000.snappy.parquet"]; assert_eq!( file_paths, expected_files, - "CommitPhase should find exactly the expected file" + "CommitReader should find exactly the expected file" ); Ok(()) diff --git a/kernel/src/scan/mod.rs b/kernel/src/scan/mod.rs index c571c346f..fdefff7b4 100644 --- a/kernel/src/scan/mod.rs +++ b/kernel/src/scan/mod.rs @@ -41,7 +41,7 @@ pub(crate) mod state_info; // safety: we define get_commit_schema() and _know_ it contains ADD_NAME and REMOVE_NAME #[allow(clippy::unwrap_used)] -static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| { +pub(crate) static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| { get_commit_schema() .project(&[ADD_NAME, REMOVE_NAME]) .unwrap() From 23bf703d6a5bec133518fca2078c66120aaad580 Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 15:20:45 -0800 Subject: [PATCH 3/6] manifest v1 --- kernel/src/log_reader/manifest.rs | 289 ++++++++++++++++++++++++++++++ kernel/src/log_reader/mod.rs | 1 + 2 files changed, 290 insertions(+) create mode 100644 kernel/src/log_reader/manifest.rs diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs new file mode 100644 index 000000000..b94e29a47 --- /dev/null +++ b/kernel/src/log_reader/manifest.rs @@ -0,0 +1,289 @@ +//! Manifest phase for log replay - processes single-part checkpoint manifest files. + +use std::sync::Arc; + +use url::Url; + +use crate::actions::Sidecar; +use crate::actions::{get_all_actions_schema, visitors::SidecarVisitor, SIDECAR_NAME}; +use crate::expressions::Transform; +use crate::log_replay::ActionsBatch; +use crate::schema::{Schema, SchemaRef, StructField, ToSchema}; +use crate::{DeltaResult, Engine, Error, Expression, ExpressionEvaluator, FileMeta, RowVisitor}; + +/// Phase that processes single-part checkpoint manifest files. +/// +/// Extracts sidecar references while processing the manifest. +pub(crate) struct ManifestPhase { + actions: Box> + Send>, + sidecar_visitor: SidecarVisitor, + original_schema: SchemaRef, + log_root: Url, +} + +/// Possible transitions after ManifestPhase completes. +pub(crate) enum AfterManifest { + /// Has sidecars → return sidecar files + Sidecars { sidecars: Vec }, + /// No sidecars + Done, +} + +impl ManifestPhase { + /// Create a new manifest phase for a single-part checkpoint. + /// + /// The schema is automatically augmented with the sidecar column since the manifest + /// phase needs to extract sidecar references for phase transitions. + /// + /// # Parameters + /// - `manifest_file`: The checkpoint manifest file to process + /// - `log_root`: Root URL for resolving sidecar paths + /// - `engine`: Engine for reading files + /// - `base_schema`: Schema columns required by the processor (will be augmented with sidecar) + pub fn new( + manifest_file: FileMeta, + log_root: Url, + engine: Arc, + ) -> DeltaResult { + let files = vec![manifest_file.clone()]; + + // Determine file type from extension + let extension = manifest_file + .location + .path() + .rsplit('.') + .next() + .unwrap_or(""); + + let actions = match extension { + "json" => { + engine + .json_handler() + .read_json_files(&files, sidecar_schema.clone(), None)? + } + "parquet" => { + engine + .parquet_handler() + .read_parquet_files(&files, sidecar_schema.clone(), None)? + } + ext => { + return Err(Error::generic(format!( + "Unsupported checkpoint extension: {}", + ext + ))) + } + }; + + let actions = actions.map(|batch| batch.map(|b| ActionsBatch::new(b, false))); + + Ok(Self { + actions: Box::new(actions), + sidecar_visitor: SidecarVisitor::default(), + log_root, + original_schema, + projector, + }) + } + + /// Transition to the next phase. + /// + /// Returns an enum indicating what comes next: + /// - `Sidecars`: Extracted sidecar files + /// - `Done`: No sidecars found + pub(crate) fn finalize(self) -> DeltaResult { + // TODO: Check that stream is exhausted. We can track a boolean flag on whether we saw a None yet. + let sidecars = self + .sidecar_visitor + .sidecars + .into_iter() + .map(|s| s.to_filemeta(&self.log_root)) + .collect::, _>>()?; + + if sidecars.is_empty() { + Ok(AfterManifest::Done) + } else { + Ok(AfterManifest::Sidecars { sidecars }) + } + } +} + +impl Iterator for ManifestPhase { + type Item = DeltaResult; + + fn next(&mut self) -> Option { + self.actions.next().map(|batch_result| { + batch_result.and_then(|batch| { + // Extract sidecar references from the batch + self.sidecar_visitor.visit_rows_of(batch.actions())?; + + // Return the batch + // TODO: un-select sidecar actions + // TODO: project out sidecar actions + let batch = self.projector.evaluate(batch); + Ok(batch) + }) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::engine::default::executor::tokio::TokioBackgroundExecutor; + use crate::engine::default::DefaultEngine; + use crate::log_replay::LogReplayProcessor; + use crate::scan::log_replay::ScanLogReplayProcessor; + use crate::scan::state_info::StateInfo; + use object_store::local::LocalFileSystem; + use std::path::PathBuf; + use std::sync::Arc as StdArc; + + fn load_test_table( + table_name: &str, + ) -> DeltaResult<( + StdArc>, + StdArc, + url::Url, + )> { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/data"); + path.push(table_name); + + let path = std::fs::canonicalize(path) + .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?; + + let url = url::Url::from_directory_path(path) + .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; + + let store = StdArc::new(LocalFileSystem::new()); + let engine = StdArc::new(DefaultEngine::new(store)); + let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; + + Ok((engine, snapshot, url)) + } + + #[test] + fn test_manifest_phase_with_checkpoint() -> DeltaResult<()> { + // Use a table with v2 checkpoints where adds might be in sidecars + let (engine, snapshot, log_root) = load_test_table("v2-checkpoints-json-with-sidecars")?; + let log_segment = snapshot.log_segment(); + + // Check if there are any checkpoint parts + if log_segment.checkpoint_parts.is_empty() { + println!("Test table has no checkpoint parts, skipping"); + return Ok(()); + } + + let state_info = StdArc::new(StateInfo::try_new( + snapshot.schema(), + snapshot.table_configuration(), + None, + (), + )?); + + let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?; + + // Get the first checkpoint part + let checkpoint_file = &log_segment.checkpoint_parts[0]; + let manifest_file = checkpoint_file.location.clone(); + + let schema = crate::actions::get_commit_schema().project(&[crate::actions::ADD_NAME])?; + + let mut manifest_phase = + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone(), schema)?; + + // Count batches and collect results + let mut batch_count = 0; + let mut file_paths = Vec::new(); + + while let Some(result) = manifest_phase.next() { + let batch = result?; + let metadata = processor.process_actions_batch(batch)?; + let paths = metadata.visit_scan_files( + vec![], + |ps: &mut Vec, path, _, _, _, _, _| { + ps.push(path.to_string()); + }, + )?; + file_paths.extend(paths); + batch_count += 1; + } + + // For v2 checkpoints with sidecars, the manifest might not contain adds directly. + // In this test table, all adds are in sidecars, so manifest should be empty. + assert_eq!( + batch_count, 1, + "Single manifest file should produce exactly 1 batch" + ); + + // Verify the manifest itself contains no add files (they're all in sidecars) + file_paths.sort(); + assert_eq!( + file_paths.len(), 0, + "For this v2 checkpoint with sidecars, manifest should contain 0 add files (all in sidecars)" + ); + + Ok(()) + } + + #[test] + fn test_manifest_phase_collects_sidecars() -> DeltaResult<()> { + let (engine, snapshot, log_root) = load_test_table("v2-checkpoints-json-with-sidecars")?; + let log_segment = snapshot.log_segment(); + + if log_segment.checkpoint_parts.is_empty() { + println!("Test table has no checkpoint parts, skipping"); + return Ok(()); + } + + let checkpoint_file = &log_segment.checkpoint_parts[0]; + let manifest_file = checkpoint_file.location.clone(); + + let schema = crate::actions::get_commit_schema().project(&[crate::actions::ADD_NAME])?; + + let mut manifest_phase = + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone(), schema)?; + + // Drain the phase + while manifest_phase.next().is_some() {} + + // Check if sidecars were collected + let next = manifest_phase.into_next()?; + + match next { + AfterManifest::Sidecars { sidecars } => { + // For the v2-checkpoints-json-with-sidecars test table at version 6, + // there are exactly 2 sidecar files + assert_eq!( + sidecars.len(), + 2, + "Should collect exactly 2 sidecars for checkpoint at version 6" + ); + + // Extract and verify the sidecar paths + let mut collected_paths: Vec = sidecars + .iter() + .map(|fm| { + // Get the filename from the URL path + fm.location + .path_segments() + .and_then(|segments| segments.last()) + .unwrap_or("") + .to_string() + }) + .collect(); + + collected_paths.sort(); + + // Verify they're the expected sidecar files for version 6 + assert_eq!(collected_paths[0], "00000000000000000006.checkpoint.0000000001.0000000002.19af1366-a425-47f4-8fa6-8d6865625573.parquet"); + assert_eq!(collected_paths[1], "00000000000000000006.checkpoint.0000000002.0000000002.5008b69f-aa8a-4a66-9299-0733a56a7e63.parquet"); + } + AfterManifest::Done => { + panic!("Expected sidecars for v2-checkpoints-json-with-sidecars table"); + } + } + + Ok(()) + } +} diff --git a/kernel/src/log_reader/mod.rs b/kernel/src/log_reader/mod.rs index d1337ef59..1ae42a4ba 100644 --- a/kernel/src/log_reader/mod.rs +++ b/kernel/src/log_reader/mod.rs @@ -1 +1,2 @@ pub(crate) mod commit; +pub(crate) mod manifest; From 8abf164156216132895275dd866fdfbfc7a87067 Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 15:38:15 -0800 Subject: [PATCH 4/6] manifest --- kernel/src/log_reader/manifest.rs | 70 ++++++++++++++++++------------- kernel/src/scan/mod.rs | 2 +- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs index b94e29a47..9e83472fe 100644 --- a/kernel/src/log_reader/manifest.rs +++ b/kernel/src/log_reader/manifest.rs @@ -1,14 +1,16 @@ //! Manifest phase for log replay - processes single-part checkpoint manifest files. -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; +use itertools::Itertools; use url::Url; -use crate::actions::Sidecar; use crate::actions::{get_all_actions_schema, visitors::SidecarVisitor, SIDECAR_NAME}; +use crate::actions::{get_commit_schema, Sidecar, ADD_NAME}; use crate::expressions::Transform; use crate::log_replay::ActionsBatch; use crate::schema::{Schema, SchemaRef, StructField, ToSchema}; +use crate::utils::require; use crate::{DeltaResult, Engine, Error, Expression, ExpressionEvaluator, FileMeta, RowVisitor}; /// Phase that processes single-part checkpoint manifest files. @@ -17,8 +19,9 @@ use crate::{DeltaResult, Engine, Error, Expression, ExpressionEvaluator, FileMet pub(crate) struct ManifestPhase { actions: Box> + Send>, sidecar_visitor: SidecarVisitor, - original_schema: SchemaRef, + manifest_file: FileMeta, log_root: Url, + is_complete: bool, } /// Possible transitions after ManifestPhase completes. @@ -39,12 +42,18 @@ impl ManifestPhase { /// - `manifest_file`: The checkpoint manifest file to process /// - `log_root`: Root URL for resolving sidecar paths /// - `engine`: Engine for reading files - /// - `base_schema`: Schema columns required by the processor (will be augmented with sidecar) pub fn new( manifest_file: FileMeta, log_root: Url, engine: Arc, ) -> DeltaResult { + #[allow(clippy::unwrap_used)] + static MANIFEST_READ_SCHMEA: LazyLock = LazyLock::new(|| { + get_commit_schema() + .project(&[ADD_NAME, SIDECAR_NAME]) + .unwrap() + }); + let files = vec![manifest_file.clone()]; // Determine file type from extension @@ -59,13 +68,13 @@ impl ManifestPhase { "json" => { engine .json_handler() - .read_json_files(&files, sidecar_schema.clone(), None)? - } - "parquet" => { - engine - .parquet_handler() - .read_parquet_files(&files, sidecar_schema.clone(), None)? + .read_json_files(&files, MANIFEST_READ_SCHMEA.clone(), None)? } + "parquet" => engine.parquet_handler().read_parquet_files( + &files, + MANIFEST_READ_SCHMEA.clone(), + None, + )?, ext => { return Err(Error::generic(format!( "Unsupported checkpoint extension: {}", @@ -80,8 +89,8 @@ impl ManifestPhase { actions: Box::new(actions), sidecar_visitor: SidecarVisitor::default(), log_root, - original_schema, - projector, + manifest_file, + is_complete: false, }) } @@ -91,13 +100,20 @@ impl ManifestPhase { /// - `Sidecars`: Extracted sidecar files /// - `Done`: No sidecars found pub(crate) fn finalize(self) -> DeltaResult { - // TODO: Check that stream is exhausted. We can track a boolean flag on whether we saw a None yet. - let sidecars = self + require!( + self.is_complete, + Error::generic(format!( + "Finalized called on ManifestReader for file {:?}", + self.manifest_file.location + )) + ); + + let sidecars: Vec<_> = self .sidecar_visitor .sidecars .into_iter() .map(|s| s.to_filemeta(&self.log_root)) - .collect::, _>>()?; + .try_collect()?; if sidecars.is_empty() { Ok(AfterManifest::Done) @@ -111,18 +127,18 @@ impl Iterator for ManifestPhase { type Item = DeltaResult; fn next(&mut self) -> Option { - self.actions.next().map(|batch_result| { + let result = self.actions.next().map(|batch_result| { batch_result.and_then(|batch| { - // Extract sidecar references from the batch self.sidecar_visitor.visit_rows_of(batch.actions())?; - - // Return the batch - // TODO: un-select sidecar actions - // TODO: project out sidecar actions - let batch = self.projector.evaluate(batch); Ok(batch) }) - }) + }); + + if result.is_none() { + self.is_complete = true; + } + + result } } @@ -187,10 +203,8 @@ mod tests { let checkpoint_file = &log_segment.checkpoint_parts[0]; let manifest_file = checkpoint_file.location.clone(); - let schema = crate::actions::get_commit_schema().project(&[crate::actions::ADD_NAME])?; - let mut manifest_phase = - ManifestPhase::new(manifest_file, log_root.clone(), engine.clone(), schema)?; + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?; // Count batches and collect results let mut batch_count = 0; @@ -242,13 +256,13 @@ mod tests { let schema = crate::actions::get_commit_schema().project(&[crate::actions::ADD_NAME])?; let mut manifest_phase = - ManifestPhase::new(manifest_file, log_root.clone(), engine.clone(), schema)?; + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?; // Drain the phase while manifest_phase.next().is_some() {} // Check if sidecars were collected - let next = manifest_phase.into_next()?; + let next = manifest_phase.finalize()?; match next { AfterManifest::Sidecars { sidecars } => { diff --git a/kernel/src/scan/mod.rs b/kernel/src/scan/mod.rs index fdefff7b4..4fcb3af41 100644 --- a/kernel/src/scan/mod.rs +++ b/kernel/src/scan/mod.rs @@ -13,7 +13,7 @@ use self::log_replay::get_scan_metadata_transform_expr; use crate::actions::deletion_vector::{ deletion_treemap_to_bools, split_vector, DeletionVectorDescriptor, }; -use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME}; +use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME, SIDECAR_NAME}; use crate::engine_data::FilteredEngineData; use crate::expressions::transforms::ExpressionTransform; use crate::expressions::{ColumnName, ExpressionRef, Predicate, PredicateRef, Scalar}; From e129ce00c9b34146e54364530f993cb61791ad36 Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 15:50:46 -0800 Subject: [PATCH 5/6] allow_unused --- kernel/src/log_reader/manifest.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs index 9e83472fe..f22984063 100644 --- a/kernel/src/log_reader/manifest.rs +++ b/kernel/src/log_reader/manifest.rs @@ -5,17 +5,17 @@ use std::sync::{Arc, LazyLock}; use itertools::Itertools; use url::Url; -use crate::actions::{get_all_actions_schema, visitors::SidecarVisitor, SIDECAR_NAME}; -use crate::actions::{get_commit_schema, Sidecar, ADD_NAME}; -use crate::expressions::Transform; +use crate::actions::{get_commit_schema, ADD_NAME}; +use crate::actions::{visitors::SidecarVisitor, SIDECAR_NAME}; use crate::log_replay::ActionsBatch; -use crate::schema::{Schema, SchemaRef, StructField, ToSchema}; +use crate::schema::SchemaRef; use crate::utils::require; -use crate::{DeltaResult, Engine, Error, Expression, ExpressionEvaluator, FileMeta, RowVisitor}; +use crate::{DeltaResult, Engine, Error, FileMeta, RowVisitor}; /// Phase that processes single-part checkpoint manifest files. /// /// Extracts sidecar references while processing the manifest. +#[allow(unused)] pub(crate) struct ManifestPhase { actions: Box> + Send>, sidecar_visitor: SidecarVisitor, @@ -25,6 +25,7 @@ pub(crate) struct ManifestPhase { } /// Possible transitions after ManifestPhase completes. +#[allow(unused)] pub(crate) enum AfterManifest { /// Has sidecars → return sidecar files Sidecars { sidecars: Vec }, @@ -42,7 +43,8 @@ impl ManifestPhase { /// - `manifest_file`: The checkpoint manifest file to process /// - `log_root`: Root URL for resolving sidecar paths /// - `engine`: Engine for reading files - pub fn new( + #[allow(unused)] + pub(crate) fn new( manifest_file: FileMeta, log_root: Url, engine: Arc, @@ -99,6 +101,7 @@ impl ManifestPhase { /// Returns an enum indicating what comes next: /// - `Sidecars`: Extracted sidecar files /// - `Done`: No sidecars found + #[allow(unused)] pub(crate) fn finalize(self) -> DeltaResult { require!( self.is_complete, From 698da9bbdaa03caf300d9385f2c41b81e57bdceb Mon Sep 17 00:00:00 2001 From: Oussama Saoudi Date: Tue, 18 Nov 2025 16:05:21 -0800 Subject: [PATCH 6/6] improve test --- kernel/src/log_reader/commit.rs | 14 +++---- kernel/src/log_reader/manifest.rs | 69 ++++++++++++------------------- 2 files changed, 33 insertions(+), 50 deletions(-) diff --git a/kernel/src/log_reader/commit.rs b/kernel/src/log_reader/commit.rs index 17ac94bf3..3c0631e9e 100644 --- a/kernel/src/log_reader/commit.rs +++ b/kernel/src/log_reader/commit.rs @@ -55,13 +55,13 @@ mod tests { use crate::scan::COMMIT_READ_SCHEMA; use object_store::local::LocalFileSystem; use std::path::PathBuf; - use std::sync::Arc as StdArc; + use std::sync::Arc; fn load_test_table( table_name: &str, ) -> DeltaResult<( - StdArc>, - StdArc, + Arc>, + Arc, url::Url, )> { let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); @@ -74,8 +74,8 @@ mod tests { let url = url::Url::from_directory_path(path) .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; - let store = StdArc::new(LocalFileSystem::new()); - let engine = StdArc::new(DefaultEngine::new(store)); + let store = Arc::new(LocalFileSystem::new()); + let engine = Arc::new(DefaultEngine::new(store)); let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; Ok((engine, snapshot, url)) @@ -84,9 +84,9 @@ mod tests { #[test] fn test_commit_phase_processes_commits() -> DeltaResult<()> { let (engine, snapshot, _url) = load_test_table("table-without-dv-small")?; - let log_segment = StdArc::new(snapshot.log_segment().clone()); + let log_segment = Arc::new(snapshot.log_segment().clone()); - let state_info = StdArc::new(StateInfo::try_new( + let state_info = Arc::new(StateInfo::try_new( snapshot.schema(), snapshot.table_configuration(), None, diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs index f22984063..cfd133780 100644 --- a/kernel/src/log_reader/manifest.rs +++ b/kernel/src/log_reader/manifest.rs @@ -5,10 +5,11 @@ use std::sync::{Arc, LazyLock}; use itertools::Itertools; use url::Url; -use crate::actions::{get_commit_schema, ADD_NAME}; -use crate::actions::{visitors::SidecarVisitor, SIDECAR_NAME}; +use crate::actions::visitors::SidecarVisitor; +use crate::actions::SIDECAR_NAME; +use crate::actions::{Add, Sidecar, ADD_NAME}; use crate::log_replay::ActionsBatch; -use crate::schema::SchemaRef; +use crate::schema::{SchemaRef, StructField, StructType, ToSchema}; use crate::utils::require; use crate::{DeltaResult, Engine, Error, FileMeta, RowVisitor}; @@ -49,11 +50,11 @@ impl ManifestPhase { log_root: Url, engine: Arc, ) -> DeltaResult { - #[allow(clippy::unwrap_used)] static MANIFEST_READ_SCHMEA: LazyLock = LazyLock::new(|| { - get_commit_schema() - .project(&[ADD_NAME, SIDECAR_NAME]) - .unwrap() + Arc::new(StructType::new_unchecked([ + StructField::nullable(ADD_NAME, Add::to_schema()), + StructField::nullable(SIDECAR_NAME, Sidecar::to_schema()), + ])) }); let files = vec![manifest_file.clone()]; @@ -148,43 +149,37 @@ impl Iterator for ManifestPhase { #[cfg(test)] mod tests { use super::*; - use crate::engine::default::executor::tokio::TokioBackgroundExecutor; use crate::engine::default::DefaultEngine; use crate::log_replay::LogReplayProcessor; use crate::scan::log_replay::ScanLogReplayProcessor; use crate::scan::state_info::StateInfo; + use crate::SnapshotRef; use object_store::local::LocalFileSystem; - use std::path::PathBuf; - use std::sync::Arc as StdArc; + use std::sync::Arc; + use tempfile::TempDir; fn load_test_table( table_name: &str, - ) -> DeltaResult<( - StdArc>, - StdArc, - url::Url, - )> { - let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - path.push("tests/data"); - path.push(table_name); - - let path = std::fs::canonicalize(path) - .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?; - - let url = url::Url::from_directory_path(path) + ) -> DeltaResult<(Arc, SnapshotRef, Url, TempDir)> { + let test_dir = test_utils::load_test_data("tests/data", table_name) + .map_err(|e| crate::Error::Generic(format!("Failed to load test data: {}", e)))?; + let test_path = test_dir.path().join(table_name); + + let url = url::Url::from_directory_path(&test_path) .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; - let store = StdArc::new(LocalFileSystem::new()); - let engine = StdArc::new(DefaultEngine::new(store)); + let store = Arc::new(LocalFileSystem::new()); + let engine = Arc::new(DefaultEngine::new(store)); let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; - Ok((engine, snapshot, url)) + Ok((engine, snapshot, url, test_dir)) } #[test] fn test_manifest_phase_with_checkpoint() -> DeltaResult<()> { // Use a table with v2 checkpoints where adds might be in sidecars - let (engine, snapshot, log_root) = load_test_table("v2-checkpoints-json-with-sidecars")?; + let (engine, snapshot, log_root, _tempdir) = + load_test_table("v2-checkpoints-json-with-sidecars")?; let log_segment = snapshot.log_segment(); // Check if there are any checkpoint parts @@ -193,7 +188,7 @@ mod tests { return Ok(()); } - let state_info = StdArc::new(StateInfo::try_new( + let state_info = Arc::new(StateInfo::try_new( snapshot.schema(), snapshot.table_configuration(), None, @@ -210,10 +205,9 @@ mod tests { ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?; // Count batches and collect results - let mut batch_count = 0; let mut file_paths = Vec::new(); - while let Some(result) = manifest_phase.next() { + for result in manifest_phase { let batch = result?; let metadata = processor.process_actions_batch(batch)?; let paths = metadata.visit_scan_files( @@ -223,18 +217,8 @@ mod tests { }, )?; file_paths.extend(paths); - batch_count += 1; } - - // For v2 checkpoints with sidecars, the manifest might not contain adds directly. - // In this test table, all adds are in sidecars, so manifest should be empty. - assert_eq!( - batch_count, 1, - "Single manifest file should produce exactly 1 batch" - ); - // Verify the manifest itself contains no add files (they're all in sidecars) - file_paths.sort(); assert_eq!( file_paths.len(), 0, "For this v2 checkpoint with sidecars, manifest should contain 0 add files (all in sidecars)" @@ -245,7 +229,8 @@ mod tests { #[test] fn test_manifest_phase_collects_sidecars() -> DeltaResult<()> { - let (engine, snapshot, log_root) = load_test_table("v2-checkpoints-json-with-sidecars")?; + let (engine, snapshot, log_root, _tempdir) = + load_test_table("v2-checkpoints-json-with-sidecars")?; let log_segment = snapshot.log_segment(); if log_segment.checkpoint_parts.is_empty() { @@ -256,8 +241,6 @@ mod tests { let checkpoint_file = &log_segment.checkpoint_parts[0]; let manifest_file = checkpoint_file.location.clone(); - let schema = crate::actions::get_commit_schema().project(&[crate::actions::ADD_NAME])?; - let mut manifest_phase = ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?;