diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 0f611cfbc..205fb0e9a 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -93,6 +93,7 @@ pub mod error; pub mod expressions; mod log_compaction; mod log_path; +mod log_reader; pub mod scan; pub mod schema; pub mod snapshot; diff --git a/kernel/src/log_reader/commit.rs b/kernel/src/log_reader/commit.rs new file mode 100644 index 000000000..3c0631e9e --- /dev/null +++ b/kernel/src/log_reader/commit.rs @@ -0,0 +1,133 @@ +//! Commit phase for log replay - processes JSON commit files. + +use std::sync::Arc; + +use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME}; +use crate::log_replay::ActionsBatch; +use crate::log_segment::LogSegment; +use crate::schema::SchemaRef; +use crate::{DeltaResult, Engine}; + +/// Phase that processes JSON commit files. +pub(crate) struct CommitReader { + actions: Box> + Send>, +} + +impl CommitReader { + /// Create a new commit phase from a log segment. + /// + /// # Parameters + /// - `log_segment`: The log segment to process + /// - `engine`: Engine for reading files + pub(crate) fn try_new( + engine: &dyn Engine, + log_segment: &LogSegment, + schema: SchemaRef, + ) -> DeltaResult { + let commit_files = log_segment.find_commit_cover(); + let actions = engine + .json_handler() + .read_json_files(&commit_files, schema, None)? + .map(|batch| batch.map(|b| ActionsBatch::new(b, true))); + + Ok(Self { + actions: Box::new(actions), + }) + } +} + +impl Iterator for CommitReader { + type Item = DeltaResult; + + fn next(&mut self) -> Option { + self.actions.next() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::engine::default::executor::tokio::TokioBackgroundExecutor; + use crate::engine::default::DefaultEngine; + use crate::log_replay::LogReplayProcessor; + use crate::scan::log_replay::ScanLogReplayProcessor; + use crate::scan::state_info::StateInfo; + use crate::scan::COMMIT_READ_SCHEMA; + use object_store::local::LocalFileSystem; + use std::path::PathBuf; + use std::sync::Arc; + + fn load_test_table( + table_name: &str, + ) -> DeltaResult<( + Arc>, + Arc, + url::Url, + )> { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/data"); + path.push(table_name); + + let path = std::fs::canonicalize(path) + .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?; + + let url = url::Url::from_directory_path(path) + .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; + + let store = Arc::new(LocalFileSystem::new()); + let engine = Arc::new(DefaultEngine::new(store)); + let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; + + Ok((engine, snapshot, url)) + } + + #[test] + fn test_commit_phase_processes_commits() -> DeltaResult<()> { + let (engine, snapshot, _url) = load_test_table("table-without-dv-small")?; + let log_segment = Arc::new(snapshot.log_segment().clone()); + + let state_info = Arc::new(StateInfo::try_new( + snapshot.schema(), + snapshot.table_configuration(), + None, + (), + )?); + + let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?; + let schema = COMMIT_READ_SCHEMA.clone(); + let mut commit_phase = CommitReader::try_new(engine.as_ref(), &log_segment, schema)?; + + let mut batch_count = 0; + let mut file_paths = Vec::new(); + + for result in commit_phase { + let batch = result?; + let metadata = processor.process_actions_batch(batch)?; + let paths = metadata.visit_scan_files( + vec![], + |ps: &mut Vec, path, _, _, _, _, _| { + ps.push(path.to_string()); + }, + )?; + file_paths.extend(paths); + batch_count += 1; + } + + // table-without-dv-small has exactly 1 commit file + assert_eq!( + batch_count, 1, + "table-without-dv-small should have exactly 1 commit batch" + ); + + // table-without-dv-small has exactly 1 add file + file_paths.sort(); + let expected_files = + vec!["part-00000-517f5d32-9c95-48e8-82b4-0229cc194867-c000.snappy.parquet"]; + assert_eq!( + file_paths, expected_files, + "CommitReader should find exactly the expected file" + ); + + Ok(()) + } +} diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs new file mode 100644 index 000000000..cfd133780 --- /dev/null +++ b/kernel/src/log_reader/manifest.rs @@ -0,0 +1,289 @@ +//! Manifest phase for log replay - processes single-part checkpoint manifest files. + +use std::sync::{Arc, LazyLock}; + +use itertools::Itertools; +use url::Url; + +use crate::actions::visitors::SidecarVisitor; +use crate::actions::SIDECAR_NAME; +use crate::actions::{Add, Sidecar, ADD_NAME}; +use crate::log_replay::ActionsBatch; +use crate::schema::{SchemaRef, StructField, StructType, ToSchema}; +use crate::utils::require; +use crate::{DeltaResult, Engine, Error, FileMeta, RowVisitor}; + +/// Phase that processes single-part checkpoint manifest files. +/// +/// Extracts sidecar references while processing the manifest. +#[allow(unused)] +pub(crate) struct ManifestPhase { + actions: Box> + Send>, + sidecar_visitor: SidecarVisitor, + manifest_file: FileMeta, + log_root: Url, + is_complete: bool, +} + +/// Possible transitions after ManifestPhase completes. +#[allow(unused)] +pub(crate) enum AfterManifest { + /// Has sidecars → return sidecar files + Sidecars { sidecars: Vec }, + /// No sidecars + Done, +} + +impl ManifestPhase { + /// Create a new manifest phase for a single-part checkpoint. + /// + /// The schema is automatically augmented with the sidecar column since the manifest + /// phase needs to extract sidecar references for phase transitions. + /// + /// # Parameters + /// - `manifest_file`: The checkpoint manifest file to process + /// - `log_root`: Root URL for resolving sidecar paths + /// - `engine`: Engine for reading files + #[allow(unused)] + pub(crate) fn new( + manifest_file: FileMeta, + log_root: Url, + engine: Arc, + ) -> DeltaResult { + static MANIFEST_READ_SCHMEA: LazyLock = LazyLock::new(|| { + Arc::new(StructType::new_unchecked([ + StructField::nullable(ADD_NAME, Add::to_schema()), + StructField::nullable(SIDECAR_NAME, Sidecar::to_schema()), + ])) + }); + + let files = vec![manifest_file.clone()]; + + // Determine file type from extension + let extension = manifest_file + .location + .path() + .rsplit('.') + .next() + .unwrap_or(""); + + let actions = match extension { + "json" => { + engine + .json_handler() + .read_json_files(&files, MANIFEST_READ_SCHMEA.clone(), None)? + } + "parquet" => engine.parquet_handler().read_parquet_files( + &files, + MANIFEST_READ_SCHMEA.clone(), + None, + )?, + ext => { + return Err(Error::generic(format!( + "Unsupported checkpoint extension: {}", + ext + ))) + } + }; + + let actions = actions.map(|batch| batch.map(|b| ActionsBatch::new(b, false))); + + Ok(Self { + actions: Box::new(actions), + sidecar_visitor: SidecarVisitor::default(), + log_root, + manifest_file, + is_complete: false, + }) + } + + /// Transition to the next phase. + /// + /// Returns an enum indicating what comes next: + /// - `Sidecars`: Extracted sidecar files + /// - `Done`: No sidecars found + #[allow(unused)] + pub(crate) fn finalize(self) -> DeltaResult { + require!( + self.is_complete, + Error::generic(format!( + "Finalized called on ManifestReader for file {:?}", + self.manifest_file.location + )) + ); + + let sidecars: Vec<_> = self + .sidecar_visitor + .sidecars + .into_iter() + .map(|s| s.to_filemeta(&self.log_root)) + .try_collect()?; + + if sidecars.is_empty() { + Ok(AfterManifest::Done) + } else { + Ok(AfterManifest::Sidecars { sidecars }) + } + } +} + +impl Iterator for ManifestPhase { + type Item = DeltaResult; + + fn next(&mut self) -> Option { + let result = self.actions.next().map(|batch_result| { + batch_result.and_then(|batch| { + self.sidecar_visitor.visit_rows_of(batch.actions())?; + Ok(batch) + }) + }); + + if result.is_none() { + self.is_complete = true; + } + + result + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::engine::default::DefaultEngine; + use crate::log_replay::LogReplayProcessor; + use crate::scan::log_replay::ScanLogReplayProcessor; + use crate::scan::state_info::StateInfo; + use crate::SnapshotRef; + use object_store::local::LocalFileSystem; + use std::sync::Arc; + use tempfile::TempDir; + + fn load_test_table( + table_name: &str, + ) -> DeltaResult<(Arc, SnapshotRef, Url, TempDir)> { + let test_dir = test_utils::load_test_data("tests/data", table_name) + .map_err(|e| crate::Error::Generic(format!("Failed to load test data: {}", e)))?; + let test_path = test_dir.path().join(table_name); + + let url = url::Url::from_directory_path(&test_path) + .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?; + + let store = Arc::new(LocalFileSystem::new()); + let engine = Arc::new(DefaultEngine::new(store)); + let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?; + + Ok((engine, snapshot, url, test_dir)) + } + + #[test] + fn test_manifest_phase_with_checkpoint() -> DeltaResult<()> { + // Use a table with v2 checkpoints where adds might be in sidecars + let (engine, snapshot, log_root, _tempdir) = + load_test_table("v2-checkpoints-json-with-sidecars")?; + let log_segment = snapshot.log_segment(); + + // Check if there are any checkpoint parts + if log_segment.checkpoint_parts.is_empty() { + println!("Test table has no checkpoint parts, skipping"); + return Ok(()); + } + + let state_info = Arc::new(StateInfo::try_new( + snapshot.schema(), + snapshot.table_configuration(), + None, + (), + )?); + + let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?; + + // Get the first checkpoint part + let checkpoint_file = &log_segment.checkpoint_parts[0]; + let manifest_file = checkpoint_file.location.clone(); + + let mut manifest_phase = + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?; + + // Count batches and collect results + let mut file_paths = Vec::new(); + + for result in manifest_phase { + let batch = result?; + let metadata = processor.process_actions_batch(batch)?; + let paths = metadata.visit_scan_files( + vec![], + |ps: &mut Vec, path, _, _, _, _, _| { + ps.push(path.to_string()); + }, + )?; + file_paths.extend(paths); + } + // Verify the manifest itself contains no add files (they're all in sidecars) + assert_eq!( + file_paths.len(), 0, + "For this v2 checkpoint with sidecars, manifest should contain 0 add files (all in sidecars)" + ); + + Ok(()) + } + + #[test] + fn test_manifest_phase_collects_sidecars() -> DeltaResult<()> { + let (engine, snapshot, log_root, _tempdir) = + load_test_table("v2-checkpoints-json-with-sidecars")?; + let log_segment = snapshot.log_segment(); + + if log_segment.checkpoint_parts.is_empty() { + println!("Test table has no checkpoint parts, skipping"); + return Ok(()); + } + + let checkpoint_file = &log_segment.checkpoint_parts[0]; + let manifest_file = checkpoint_file.location.clone(); + + let mut manifest_phase = + ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?; + + // Drain the phase + while manifest_phase.next().is_some() {} + + // Check if sidecars were collected + let next = manifest_phase.finalize()?; + + match next { + AfterManifest::Sidecars { sidecars } => { + // For the v2-checkpoints-json-with-sidecars test table at version 6, + // there are exactly 2 sidecar files + assert_eq!( + sidecars.len(), + 2, + "Should collect exactly 2 sidecars for checkpoint at version 6" + ); + + // Extract and verify the sidecar paths + let mut collected_paths: Vec = sidecars + .iter() + .map(|fm| { + // Get the filename from the URL path + fm.location + .path_segments() + .and_then(|segments| segments.last()) + .unwrap_or("") + .to_string() + }) + .collect(); + + collected_paths.sort(); + + // Verify they're the expected sidecar files for version 6 + assert_eq!(collected_paths[0], "00000000000000000006.checkpoint.0000000001.0000000002.19af1366-a425-47f4-8fa6-8d6865625573.parquet"); + assert_eq!(collected_paths[1], "00000000000000000006.checkpoint.0000000002.0000000002.5008b69f-aa8a-4a66-9299-0733a56a7e63.parquet"); + } + AfterManifest::Done => { + panic!("Expected sidecars for v2-checkpoints-json-with-sidecars table"); + } + } + + Ok(()) + } +} diff --git a/kernel/src/log_reader/mod.rs b/kernel/src/log_reader/mod.rs new file mode 100644 index 000000000..1ae42a4ba --- /dev/null +++ b/kernel/src/log_reader/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod commit; +pub(crate) mod manifest; diff --git a/kernel/src/log_segment.rs b/kernel/src/log_segment.rs index 4eb5e1ee7..e6ef5fd41 100644 --- a/kernel/src/log_segment.rs +++ b/kernel/src/log_segment.rs @@ -9,6 +9,7 @@ use crate::actions::{ PROTOCOL_NAME, SIDECAR_NAME, }; use crate::last_checkpoint_hint::LastCheckpointHint; +use crate::log_reader::commit::CommitReader; use crate::log_replay::ActionsBatch; use crate::path::{LogPathFileType, ParsedLogPath}; use crate::schema::{SchemaRef, StructField, ToSchema as _}; @@ -303,15 +304,7 @@ impl LogSegment { meta_predicate: Option, ) -> DeltaResult> + Send> { // `replay` expects commit files to be sorted in descending order, so the return value here is correct - let commits_and_compactions = self.find_commit_cover(); - let commit_stream = engine - .json_handler() - .read_json_files( - &commits_and_compactions, - commit_read_schema, - meta_predicate.clone(), - )? - .map_ok(|batch| ActionsBatch::new(batch, true)); + let commit_stream = CommitReader::try_new(engine, self, commit_read_schema)?; let checkpoint_stream = self.create_checkpoint_stream(engine, checkpoint_read_schema, meta_predicate)?; @@ -340,7 +333,7 @@ impl LogSegment { /// returns files is DESCENDING ORDER, as that's what `replay` expects. This function assumes /// that all files in `self.ascending_commit_files` and `self.ascending_compaction_files` are in /// range for this log segment. This invariant is maintained by our listing code. - fn find_commit_cover(&self) -> Vec { + pub(crate) fn find_commit_cover(&self) -> Vec { // Create an iterator sorted in ascending order by (initial version, end version), e.g. // [00.json, 00.09.compacted.json, 00.99.compacted.json, 01.json, 02.json, ..., 10.json, // 10.19.compacted.json, 11.json, ...] diff --git a/kernel/src/scan/log_replay.rs b/kernel/src/scan/log_replay.rs index 7bf69eb72..e0fe51b3f 100644 --- a/kernel/src/scan/log_replay.rs +++ b/kernel/src/scan/log_replay.rs @@ -53,7 +53,7 @@ pub(crate) struct ScanLogReplayProcessor { impl ScanLogReplayProcessor { /// Create a new [`ScanLogReplayProcessor`] instance - fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult { + pub(crate) fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult { // Extract the physical predicate from StateInfo's PhysicalPredicate enum. // The DataSkippingFilter and partition_filter components expect the predicate // in the format Option<(PredicateRef, SchemaRef)>, so we need to convert from diff --git a/kernel/src/scan/mod.rs b/kernel/src/scan/mod.rs index c571c346f..4fcb3af41 100644 --- a/kernel/src/scan/mod.rs +++ b/kernel/src/scan/mod.rs @@ -13,7 +13,7 @@ use self::log_replay::get_scan_metadata_transform_expr; use crate::actions::deletion_vector::{ deletion_treemap_to_bools, split_vector, DeletionVectorDescriptor, }; -use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME}; +use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME, SIDECAR_NAME}; use crate::engine_data::FilteredEngineData; use crate::expressions::transforms::ExpressionTransform; use crate::expressions::{ColumnName, ExpressionRef, Predicate, PredicateRef, Scalar}; @@ -41,7 +41,7 @@ pub(crate) mod state_info; // safety: we define get_commit_schema() and _know_ it contains ADD_NAME and REMOVE_NAME #[allow(clippy::unwrap_used)] -static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| { +pub(crate) static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| { get_commit_schema() .project(&[ADD_NAME, REMOVE_NAME]) .unwrap()