diff --git a/kernel/src/distributed/driver.rs b/kernel/src/distributed/driver.rs
new file mode 100644
index 000000000..efb7f4ea8
--- /dev/null
+++ b/kernel/src/distributed/driver.rs
@@ -0,0 +1,423 @@
+//! Driver (Phase 1) log replay composition for distributed execution.
+//!
+//! This module provides driver-side execution that processes commits and manifest,
+//! then returns processor + files for distribution to executors.
+//!
+//! Supports streaming operations via `LogReplayProcessor`.
+
+use std::sync::Arc;
+
+use crate::actions::get_commit_schema;
+use crate::log_reader::commit::CommitReader;
+use crate::log_reader::manifest::{AfterManifest, ManifestPhase};
+use crate::log_replay::LogReplayProcessor;
+use crate::log_segment::LogSegment;
+use crate::{DeltaResult, Engine, Error, FileMeta};
+
+/// Driver-side log replay (Phase 1) for distributed execution.
+///
+/// This iterator processes:
+/// 1. Commit files (JSON)
+/// 2. Manifest (single-part checkpoint, if present)
+///
+/// After exhaustion, call `finish()` to extract:
+/// - The processor (for serialization and distribution)
+/// - Files to distribute (sidecars or multi-part checkpoint parts)
+///
+/// # Example
+///
+/// ```ignore
+/// let mut driver = DriverPhase::try_new(processor, log_segment, engine)?;
+///
+/// // Iterate over driver-side batches
+/// for batch in driver {
+/// let metadata = batch?;
+/// // Process metadata
+/// }
+///
+/// // Extract processor and files for distribution (if needed)
+/// match driver.finish()? {
+/// Some((processor, files)) => {
+/// // Executor phase needed - distribute files
+/// let serialized = processor.serialize()?;
+/// let partitions = partition_files(files, num_executors);
+/// for (executor, partition) in partitions {
+/// executor.send(serialized.clone(), partition)?;
+/// }
+/// }
+/// None => {
+/// // No executor phase needed - all processing complete
+/// println!("Log replay complete on driver");
+/// }
+/// }
+/// ```
+pub(crate) struct DriverPhase
{
+ processor: P,
+ state: Option,
+ /// Pre-computed next state after commit for concurrent IO
+ next_state_after_commit: Option,
+ /// Whether the iterator has been fully exhausted
+ is_finished: bool,
+}
+
+enum DriverState {
+ Commit(CommitReader),
+ Manifest(ManifestPhase),
+ /// Executor phase needed - has files to distribute
+ ExecutorPhase {
+ files: Vec,
+ },
+ /// Done - no more work needed
+ Done,
+}
+
+/// Result of driver phase processing.
+pub(crate) enum DriverPhaseResult {
+ /// All processing complete on driver - no executor phase needed.
+ Complete(P),
+ /// Executor phase needed - distribute files to executors for parallel processing.
+ NeedsExecutorPhase { processor: P, files: Vec },
+}
+
+impl DriverPhase {
+ /// Create a new driver-side log replay.
+ ///
+ /// Works for streaming operations via `LogReplayProcessor`.
+ ///
+ /// # Parameters
+ /// - `processor`: The log replay processor
+ /// - `log_segment`: The log segment to process
+ /// - `engine`: Engine for reading files
+ pub(crate) fn try_new(
+ processor: P,
+ log_segment: Arc,
+ engine: Arc,
+ ) -> DeltaResult {
+ let commit_schema = get_commit_schema();
+ let commit = CommitReader::try_new(engine.as_ref(), &log_segment, commit_schema.clone())?;
+
+ // Concurrently compute the next state after commit for parallel IO
+ let next_state_after_commit = Some(Self::compute_state_after_commit(&log_segment, engine.clone())?);
+
+ Ok(Self {
+ processor,
+ state: Some(DriverState::Commit(commit)),
+ next_state_after_commit,
+ is_finished: false,
+ })
+ }
+
+ /// Compute the next state after CommitReader is exhausted.
+ ///
+ /// This is called during construction to enable concurrent IO initialization.
+ /// Returns the appropriate DriverState based on checkpoint configuration:
+ /// - Single-part checkpoint → Manifest phase (pre-initialized)
+ /// - Multi-part checkpoint → ExecutorPhase with all parts
+ /// - No checkpoint → Done
+ fn compute_state_after_commit(
+ log_segment: &LogSegment,
+ engine: Arc,
+ ) -> DeltaResult {
+ if log_segment.checkpoint_parts.is_empty() {
+ // No checkpoint
+ Ok(DriverState::Done)
+ } else if log_segment.checkpoint_parts.len() == 1 {
+ // Single-part checkpoint: create manifest phase
+ let checkpoint_part = &log_segment.checkpoint_parts[0];
+ let manifest = ManifestPhase::new(
+ checkpoint_part.location.clone(),
+ log_segment.log_root.clone(),
+ engine,
+ )?;
+ Ok(DriverState::Manifest(manifest))
+ } else {
+ // Multi-part checkpoint: all parts are leaf files
+ let files: Vec<_> = log_segment
+ .checkpoint_parts
+ .iter()
+ .map(|p| p.location.clone())
+ .collect();
+ Ok(DriverState::ExecutorPhase { files })
+ }
+ }
+}
+
+impl Iterator for DriverPhase {
+ type Item = DeltaResult;
+
+ fn next(&mut self) -> Option {
+ loop {
+ // Try to get item from current phase
+ let batch_result = match self.state.as_mut()? {
+ DriverState::Commit(phase) => phase.next(),
+ DriverState::Manifest(phase) => phase.next(),
+ DriverState::ExecutorPhase { .. } | DriverState::Done => {
+ self.is_finished = true;
+ return None;
+ }
+ };
+
+ match batch_result {
+ Some(Ok(batch)) => {
+ // Process the batch through the processor
+ return Some(self.processor.process_actions_batch(batch));
+ }
+ Some(Err(e)) => return Some(Err(e)),
+ None => {
+ // Phase exhausted - transition
+ let old_state = self.state.take()?;
+ match self.transition(old_state) {
+ Ok(new_state) => self.state = Some(new_state),
+ Err(e) => return Some(Err(e)),
+ }
+ }
+ }
+ }
+ }
+}
+
+impl DriverPhase
{
+ fn transition(&mut self, state: DriverState) -> DeltaResult {
+ match state {
+ DriverState::Commit(_) => {
+ // Use pre-computed state (always initialized in constructor)
+ self.next_state_after_commit.take().ok_or_else(|| {
+ Error::generic("next_state_after_commit should be initialized in constructor")
+ })
+ }
+
+ DriverState::Manifest(manifest) => {
+ // After ManifestPhase exhausted, check for sidecars
+ match manifest.finalize()? {
+ AfterManifest::Sidecars { sidecars } => {
+ Ok(DriverState::ExecutorPhase { files: sidecars })
+ }
+ AfterManifest::Done => Ok(DriverState::Done),
+ }
+ }
+
+ // These states are terminal and should never be transitioned from
+ DriverState::ExecutorPhase { .. } | DriverState::Done => {
+ Err(Error::generic("Invalid state transition: terminal state reached"))
+ }
+ }
+ }
+}
+
+// ============================================================================
+// Streaming API: available when P implements LogReplayProcessor
+// ============================================================================
+
+impl DriverPhase {
+ /// Complete driver phase and extract processor + files for distribution.
+ ///
+ /// Must be called after the iterator is exhausted.
+ ///
+ /// # Returns
+ /// - `Complete`: All processing done on driver - no executor phase needed
+ /// - `NeedsExecutorPhase`: Executor phase needed - distribute files to executors
+ ///
+ /// # Errors
+ /// Returns an error if called before iterator exhaustion.
+ pub(crate) fn finish(self) -> DeltaResult> {
+ if !self.is_finished {
+ return Err(Error::generic(
+ "Must exhaust iterator before calling finish()",
+ ));
+ }
+
+ match self.state {
+ Some(DriverState::ExecutorPhase { files }) => {
+ Ok(DriverPhaseResult::NeedsExecutorPhase {
+ processor: self.processor,
+ files,
+ })
+ }
+ Some(DriverState::Done) | None => Ok(DriverPhaseResult::Complete(self.processor)),
+ _ => Err(Error::generic("Unexpected state after iterator exhaustion")),
+ }
+ }
+}
+
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::engine::default::executor::tokio::TokioBackgroundExecutor;
+ use crate::engine::default::DefaultEngine;
+ use crate::scan::log_replay::ScanLogReplayProcessor;
+ use crate::scan::state_info::StateInfo;
+ use object_store::local::LocalFileSystem;
+ use std::path::PathBuf;
+ use std::sync::Arc as StdArc;
+
+ fn load_test_table(
+ table_name: &str,
+ ) -> DeltaResult<(
+ StdArc>,
+ StdArc,
+ url::Url,
+ )> {
+ let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ path.push("tests/data");
+ path.push(table_name);
+
+ let path = std::fs::canonicalize(path)
+ .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?;
+
+ let url = url::Url::from_directory_path(path)
+ .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?;
+
+ let store = StdArc::new(LocalFileSystem::new());
+ let engine = StdArc::new(DefaultEngine::new(store));
+ let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?;
+
+ Ok((engine, snapshot, url))
+ }
+
+ #[test]
+ fn test_driver_v2_with_commits_only() -> DeltaResult<()> {
+ let (engine, snapshot, _url) = load_test_table("table-without-dv-small")?;
+ let log_segment = StdArc::new(snapshot.log_segment().clone());
+
+ let state_info = StdArc::new(StateInfo::try_new(
+ snapshot.schema(),
+ snapshot.table_configuration(),
+ None,
+ (),
+ )?);
+
+ let processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?;
+ let mut driver = DriverPhase::try_new(processor, log_segment, engine.clone())?;
+
+ let mut batch_count = 0;
+ let mut file_paths = Vec::new();
+
+ while let Some(result) = driver.next() {
+ let metadata = result?;
+ let paths = metadata.visit_scan_files(
+ vec![],
+ |ps: &mut Vec, path, _, _, _, _, _| {
+ ps.push(path.to_string());
+ },
+ )?;
+ file_paths.extend(paths);
+ batch_count += 1;
+ }
+
+ // table-without-dv-small has exactly 1 commit
+ assert_eq!(
+ batch_count, 1,
+ "DriverPhase should process exactly 1 batch for table-without-dv-small"
+ );
+
+ file_paths.sort();
+ let expected_files =
+ vec!["part-00000-517f5d32-9c95-48e8-82b4-0229cc194867-c000.snappy.parquet"];
+ assert_eq!(
+ file_paths, expected_files,
+ "DriverPhase should find exactly the expected file"
+ );
+
+ // No executor phase needed for commits-only table
+ let result = driver.finish()?;
+ match result {
+ DriverPhaseResult::Complete(_processor) => {
+ // Expected - no executor phase needed
+ }
+ DriverPhaseResult::NeedsExecutorPhase { .. } => {
+ panic!("Expected Complete, but got NeedsExecutorPhase for commits-only table");
+ }
+ }
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_driver_v2_with_sidecars() -> DeltaResult<()> {
+ let (engine, snapshot, _url) = load_test_table("v2-checkpoints-json-with-sidecars")?;
+ let log_segment = StdArc::new(snapshot.log_segment().clone());
+
+ let state_info = StdArc::new(StateInfo::try_new(
+ snapshot.schema(),
+ snapshot.table_configuration(),
+ None,
+ (),
+ )?);
+
+ let processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?;
+ let mut driver = DriverPhase::try_new(processor, log_segment, engine.clone())?;
+
+ let mut driver_batch_count = 0;
+ let mut driver_file_paths = Vec::new();
+
+ while let Some(result) = driver.next() {
+ let metadata = result?;
+ let paths = metadata.visit_scan_files(
+ vec![],
+ |ps: &mut Vec, path, _, _, _, _, _| {
+ ps.push(path.to_string());
+ },
+ )?;
+ driver_file_paths.extend(paths);
+ driver_batch_count += 1;
+ }
+
+ // Driver processes commits after checkpoint (v7-v12) in batches, then manifest
+ // For v2-checkpoints-json-with-sidecars: checkpoint at v6, commits 7-12 exist
+ // The commits 7-12 contain no new add actions (only removes/metadata updates)
+ // So driver produces batches from commits, but those batches contain 0 files
+ // Note: A single batch may contain multiple commits
+ assert!(
+ driver_batch_count >= 1,
+ "DriverPhase should process at least 1 batch"
+ );
+
+ // The driver should process 0 files (all adds are in the checkpoint sidecars, commits after checkpoint have no new adds)
+ driver_file_paths.sort();
+ assert_eq!(
+ driver_file_paths.len(), 0,
+ "DriverPhase should find 0 files (all adds are in checkpoint sidecars, commits 7-12 have no new add actions)"
+ );
+
+ // Should have executor phase with sidecars from the checkpoint
+ let result = driver.finish()?;
+ match result {
+ DriverPhaseResult::NeedsExecutorPhase {
+ processor: _processor,
+ files,
+ } => {
+ assert_eq!(
+ files.len(),
+ 2,
+ "DriverPhase should collect exactly 2 sidecar files from checkpoint for distribution"
+ );
+
+ // Extract and verify the sidecar file paths
+ let mut collected_paths: Vec = files
+ .iter()
+ .map(|fm| {
+ // Get the filename from the URL path
+ fm.location
+ .path_segments()
+ .and_then(|segments| segments.last())
+ .unwrap_or("")
+ .to_string()
+ })
+ .collect();
+
+ collected_paths.sort();
+
+ // Verify they're the expected sidecar files for version 6
+ assert_eq!(collected_paths[0], "00000000000000000006.checkpoint.0000000001.0000000002.19af1366-a425-47f4-8fa6-8d6865625573.parquet");
+ assert_eq!(collected_paths[1], "00000000000000000006.checkpoint.0000000002.0000000002.5008b69f-aa8a-4a66-9299-0733a56a7e63.parquet");
+ }
+ DriverPhaseResult::Complete(_processor) => {
+ panic!("Expected NeedsExecutorPhase for table with sidecars");
+ }
+ }
+
+ Ok(())
+ }
+
+}
diff --git a/kernel/src/distributed/mod.rs b/kernel/src/distributed/mod.rs
new file mode 100644
index 000000000..75a481d1f
--- /dev/null
+++ b/kernel/src/distributed/mod.rs
@@ -0,0 +1 @@
+pub(crate) mod driver;
\ No newline at end of file
diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs
index 0f611cfbc..539543afb 100644
--- a/kernel/src/lib.rs
+++ b/kernel/src/lib.rs
@@ -88,11 +88,13 @@ mod action_reconciliation;
pub mod actions;
pub mod checkpoint;
pub mod committer;
+mod distributed;
pub mod engine_data;
pub mod error;
pub mod expressions;
mod log_compaction;
mod log_path;
+mod log_reader;
pub mod scan;
pub mod schema;
pub mod snapshot;
diff --git a/kernel/src/log_reader/commit.rs b/kernel/src/log_reader/commit.rs
new file mode 100644
index 000000000..17ac94bf3
--- /dev/null
+++ b/kernel/src/log_reader/commit.rs
@@ -0,0 +1,133 @@
+//! Commit phase for log replay - processes JSON commit files.
+
+use std::sync::Arc;
+
+use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME};
+use crate::log_replay::ActionsBatch;
+use crate::log_segment::LogSegment;
+use crate::schema::SchemaRef;
+use crate::{DeltaResult, Engine};
+
+/// Phase that processes JSON commit files.
+pub(crate) struct CommitReader {
+ actions: Box> + Send>,
+}
+
+impl CommitReader {
+ /// Create a new commit phase from a log segment.
+ ///
+ /// # Parameters
+ /// - `log_segment`: The log segment to process
+ /// - `engine`: Engine for reading files
+ pub(crate) fn try_new(
+ engine: &dyn Engine,
+ log_segment: &LogSegment,
+ schema: SchemaRef,
+ ) -> DeltaResult {
+ let commit_files = log_segment.find_commit_cover();
+ let actions = engine
+ .json_handler()
+ .read_json_files(&commit_files, schema, None)?
+ .map(|batch| batch.map(|b| ActionsBatch::new(b, true)));
+
+ Ok(Self {
+ actions: Box::new(actions),
+ })
+ }
+}
+
+impl Iterator for CommitReader {
+ type Item = DeltaResult;
+
+ fn next(&mut self) -> Option {
+ self.actions.next()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::engine::default::executor::tokio::TokioBackgroundExecutor;
+ use crate::engine::default::DefaultEngine;
+ use crate::log_replay::LogReplayProcessor;
+ use crate::scan::log_replay::ScanLogReplayProcessor;
+ use crate::scan::state_info::StateInfo;
+ use crate::scan::COMMIT_READ_SCHEMA;
+ use object_store::local::LocalFileSystem;
+ use std::path::PathBuf;
+ use std::sync::Arc as StdArc;
+
+ fn load_test_table(
+ table_name: &str,
+ ) -> DeltaResult<(
+ StdArc>,
+ StdArc,
+ url::Url,
+ )> {
+ let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ path.push("tests/data");
+ path.push(table_name);
+
+ let path = std::fs::canonicalize(path)
+ .map_err(|e| crate::Error::Generic(format!("Failed to canonicalize path: {}", e)))?;
+
+ let url = url::Url::from_directory_path(path)
+ .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?;
+
+ let store = StdArc::new(LocalFileSystem::new());
+ let engine = StdArc::new(DefaultEngine::new(store));
+ let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?;
+
+ Ok((engine, snapshot, url))
+ }
+
+ #[test]
+ fn test_commit_phase_processes_commits() -> DeltaResult<()> {
+ let (engine, snapshot, _url) = load_test_table("table-without-dv-small")?;
+ let log_segment = StdArc::new(snapshot.log_segment().clone());
+
+ let state_info = StdArc::new(StateInfo::try_new(
+ snapshot.schema(),
+ snapshot.table_configuration(),
+ None,
+ (),
+ )?);
+
+ let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?;
+ let schema = COMMIT_READ_SCHEMA.clone();
+ let mut commit_phase = CommitReader::try_new(engine.as_ref(), &log_segment, schema)?;
+
+ let mut batch_count = 0;
+ let mut file_paths = Vec::new();
+
+ for result in commit_phase {
+ let batch = result?;
+ let metadata = processor.process_actions_batch(batch)?;
+ let paths = metadata.visit_scan_files(
+ vec![],
+ |ps: &mut Vec, path, _, _, _, _, _| {
+ ps.push(path.to_string());
+ },
+ )?;
+ file_paths.extend(paths);
+ batch_count += 1;
+ }
+
+ // table-without-dv-small has exactly 1 commit file
+ assert_eq!(
+ batch_count, 1,
+ "table-without-dv-small should have exactly 1 commit batch"
+ );
+
+ // table-without-dv-small has exactly 1 add file
+ file_paths.sort();
+ let expected_files =
+ vec!["part-00000-517f5d32-9c95-48e8-82b4-0229cc194867-c000.snappy.parquet"];
+ assert_eq!(
+ file_paths, expected_files,
+ "CommitReader should find exactly the expected file"
+ );
+
+ Ok(())
+ }
+}
diff --git a/kernel/src/log_reader/leaf.rs b/kernel/src/log_reader/leaf.rs
new file mode 100644
index 000000000..651f18728
--- /dev/null
+++ b/kernel/src/log_reader/leaf.rs
@@ -0,0 +1,183 @@
+//! Sidecar phase for log replay - processes sidecar/leaf parquet files.
+
+use std::sync::Arc;
+
+use crate::log_replay::ActionsBatch;
+use crate::schema::SchemaRef;
+use crate::{DeltaResult, Engine, FileMeta};
+
+/// Phase that processes sidecar or leaf parquet files.
+///
+/// This phase is distributable - you can partition `files` and create multiple
+/// instances on different executors.
+#[allow(unused)]
+pub(crate) struct LeafCheckpointReader {
+ actions: Box> + Send>,
+}
+
+impl LeafCheckpointReader {
+ /// Create a new sidecar phase from file list.
+ ///
+ /// # Distributability
+ ///
+ /// This phase is designed to be distributable. To distribute:
+ /// 1. Partition `files` across N executors
+ /// 2. Create N `LeafCheckpointReader` instances, one per executor with its file partition
+ ///
+ /// # Parameters
+ /// - `files`: Sidecar/leaf files to process
+ /// - `engine`: Engine for reading files
+ /// - `schema`: Schema to use when reading sidecar files (projected based on processor requirements)
+ #[allow(unused)]
+ pub(crate) fn new(
+ files: Vec,
+ engine: Arc,
+ schema: SchemaRef,
+ ) -> DeltaResult {
+ let actions = engine
+ .parquet_handler()
+ .read_parquet_files(&files, schema, None)?
+ .map(|batch| batch.map(|b| ActionsBatch::new(b, false)));
+
+ Ok(Self {
+ actions: Box::new(actions),
+ })
+ }
+}
+
+impl Iterator for LeafCheckpointReader {
+ type Item = DeltaResult;
+
+ fn next(&mut self) -> Option {
+ self.actions.next()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::actions::{get_commit_schema, ADD_NAME};
+ use crate::engine::default::DefaultEngine;
+ use crate::log_reader::manifest::{AfterManifest, ManifestPhase};
+ use crate::log_replay::LogReplayProcessor;
+ use crate::scan::log_replay::ScanLogReplayProcessor;
+ use crate::scan::state_info::StateInfo;
+ use crate::{Error, Snapshot, SnapshotRef};
+ use object_store::local::LocalFileSystem;
+ use std::sync::Arc;
+ use tempfile::TempDir;
+ use url::Url;
+
+ fn load_test_table(
+ table_name: &str,
+ ) -> DeltaResult<(Arc, SnapshotRef, Url, TempDir)> {
+ let test_dir = test_utils::load_test_data("tests/data", table_name)
+ .map_err(|e| Error::generic(format!("Failed to load test data: {}", e)))?;
+ let test_path = test_dir.path().join(table_name);
+
+ let url = url::Url::from_directory_path(&test_path)
+ .map_err(|_| Error::generic("Failed to create URL from path"))?;
+
+ let store = Arc::new(LocalFileSystem::new());
+ let engine = Arc::new(DefaultEngine::new(store));
+ let snapshot = Snapshot::builder_for(url.clone()).build(engine.as_ref())?;
+
+ Ok((engine, snapshot, url, test_dir))
+ }
+
+ #[test]
+ fn test_sidecar_phase_processes_files() -> DeltaResult<()> {
+ let (engine, snapshot, _table_root, _tempdir) =
+ load_test_table("v2-checkpoints-json-with-sidecars")?;
+ let log_segment = snapshot.log_segment();
+
+ let state_info = Arc::new(StateInfo::try_new(
+ snapshot.schema(),
+ snapshot.table_configuration(),
+ None,
+ (),
+ )?);
+
+ let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?;
+
+ // First we need to run through manifest phase to get the sidecar files
+ if log_segment.checkpoint_parts.is_empty() {
+ println!("Test table has no checkpoint parts, skipping");
+ return Ok(());
+ }
+
+ // Get the first checkpoint part
+ let checkpoint_file = &log_segment.checkpoint_parts[0];
+ let manifest_file = checkpoint_file.location.clone();
+
+ let mut manifest_phase =
+ ManifestPhase::new(manifest_file, log_segment.log_root.clone(), engine.clone())?;
+
+ // Drain manifest phase and apply processor
+ for batch in manifest_phase.by_ref() {
+ let batch = batch?;
+ processor.process_actions_batch(batch)?;
+ }
+
+ let after_manifest = manifest_phase.finalize()?;
+
+ match after_manifest {
+ AfterManifest::Sidecars { sidecars } => {
+ println!("Testing with {} sidecar files", sidecars.len());
+
+ let schema = get_commit_schema().project(&[ADD_NAME])?;
+
+ let mut sidecar_phase =
+ LeafCheckpointReader::new(sidecars, engine.clone(), schema)?;
+
+ let mut sidecar_file_paths = Vec::new();
+ let mut batch_count = 0;
+
+ while let Some(result) = sidecar_phase.next() {
+ let batch = result?;
+ let metadata = processor.process_actions_batch(batch)?;
+ let paths = metadata.visit_scan_files(
+ vec![],
+ |ps: &mut Vec, path, _, _, _, _, _| {
+ ps.push(path.to_string());
+ },
+ )?;
+ sidecar_file_paths.extend(paths);
+ batch_count += 1;
+ }
+
+ sidecar_file_paths.sort();
+
+ // v2-checkpoints-json-with-sidecars has exactly 2 sidecar files with 101 total files
+ assert_eq!(
+ batch_count, 2,
+ "LeafCheckpointReader should process exactly 2 sidecar batches"
+ );
+
+ assert_eq!(
+ sidecar_file_paths.len(),
+ 101,
+ "LeafCheckpointReader should find exactly 101 files from sidecars"
+ );
+
+ // Verify first few files match expected (sampling to keep test readable)
+ let expected_first_files = vec![
+ "test%25file%25prefix-part-00000-01086c52-1b86-48d0-8889-517fe626849d-c000.snappy.parquet",
+ "test%25file%25prefix-part-00000-0fd71c0e-fd08-4685-87d6-aae77532d3ea-c000.snappy.parquet",
+ "test%25file%25prefix-part-00000-2710dd7f-9fa5-429d-b3fb-c005ba16e062-c000.snappy.parquet",
+ ];
+
+ assert_eq!(
+ &sidecar_file_paths[..3],
+ &expected_first_files[..],
+ "LeafCheckpointReader should process files in expected order"
+ );
+ }
+ AfterManifest::Done => {
+ println!("No sidecars found - test inconclusive");
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/kernel/src/log_reader/manifest.rs b/kernel/src/log_reader/manifest.rs
new file mode 100644
index 000000000..cfd133780
--- /dev/null
+++ b/kernel/src/log_reader/manifest.rs
@@ -0,0 +1,289 @@
+//! Manifest phase for log replay - processes single-part checkpoint manifest files.
+
+use std::sync::{Arc, LazyLock};
+
+use itertools::Itertools;
+use url::Url;
+
+use crate::actions::visitors::SidecarVisitor;
+use crate::actions::SIDECAR_NAME;
+use crate::actions::{Add, Sidecar, ADD_NAME};
+use crate::log_replay::ActionsBatch;
+use crate::schema::{SchemaRef, StructField, StructType, ToSchema};
+use crate::utils::require;
+use crate::{DeltaResult, Engine, Error, FileMeta, RowVisitor};
+
+/// Phase that processes single-part checkpoint manifest files.
+///
+/// Extracts sidecar references while processing the manifest.
+#[allow(unused)]
+pub(crate) struct ManifestPhase {
+ actions: Box> + Send>,
+ sidecar_visitor: SidecarVisitor,
+ manifest_file: FileMeta,
+ log_root: Url,
+ is_complete: bool,
+}
+
+/// Possible transitions after ManifestPhase completes.
+#[allow(unused)]
+pub(crate) enum AfterManifest {
+ /// Has sidecars → return sidecar files
+ Sidecars { sidecars: Vec },
+ /// No sidecars
+ Done,
+}
+
+impl ManifestPhase {
+ /// Create a new manifest phase for a single-part checkpoint.
+ ///
+ /// The schema is automatically augmented with the sidecar column since the manifest
+ /// phase needs to extract sidecar references for phase transitions.
+ ///
+ /// # Parameters
+ /// - `manifest_file`: The checkpoint manifest file to process
+ /// - `log_root`: Root URL for resolving sidecar paths
+ /// - `engine`: Engine for reading files
+ #[allow(unused)]
+ pub(crate) fn new(
+ manifest_file: FileMeta,
+ log_root: Url,
+ engine: Arc,
+ ) -> DeltaResult {
+ static MANIFEST_READ_SCHMEA: LazyLock = LazyLock::new(|| {
+ Arc::new(StructType::new_unchecked([
+ StructField::nullable(ADD_NAME, Add::to_schema()),
+ StructField::nullable(SIDECAR_NAME, Sidecar::to_schema()),
+ ]))
+ });
+
+ let files = vec![manifest_file.clone()];
+
+ // Determine file type from extension
+ let extension = manifest_file
+ .location
+ .path()
+ .rsplit('.')
+ .next()
+ .unwrap_or("");
+
+ let actions = match extension {
+ "json" => {
+ engine
+ .json_handler()
+ .read_json_files(&files, MANIFEST_READ_SCHMEA.clone(), None)?
+ }
+ "parquet" => engine.parquet_handler().read_parquet_files(
+ &files,
+ MANIFEST_READ_SCHMEA.clone(),
+ None,
+ )?,
+ ext => {
+ return Err(Error::generic(format!(
+ "Unsupported checkpoint extension: {}",
+ ext
+ )))
+ }
+ };
+
+ let actions = actions.map(|batch| batch.map(|b| ActionsBatch::new(b, false)));
+
+ Ok(Self {
+ actions: Box::new(actions),
+ sidecar_visitor: SidecarVisitor::default(),
+ log_root,
+ manifest_file,
+ is_complete: false,
+ })
+ }
+
+ /// Transition to the next phase.
+ ///
+ /// Returns an enum indicating what comes next:
+ /// - `Sidecars`: Extracted sidecar files
+ /// - `Done`: No sidecars found
+ #[allow(unused)]
+ pub(crate) fn finalize(self) -> DeltaResult {
+ require!(
+ self.is_complete,
+ Error::generic(format!(
+ "Finalized called on ManifestReader for file {:?}",
+ self.manifest_file.location
+ ))
+ );
+
+ let sidecars: Vec<_> = self
+ .sidecar_visitor
+ .sidecars
+ .into_iter()
+ .map(|s| s.to_filemeta(&self.log_root))
+ .try_collect()?;
+
+ if sidecars.is_empty() {
+ Ok(AfterManifest::Done)
+ } else {
+ Ok(AfterManifest::Sidecars { sidecars })
+ }
+ }
+}
+
+impl Iterator for ManifestPhase {
+ type Item = DeltaResult;
+
+ fn next(&mut self) -> Option {
+ let result = self.actions.next().map(|batch_result| {
+ batch_result.and_then(|batch| {
+ self.sidecar_visitor.visit_rows_of(batch.actions())?;
+ Ok(batch)
+ })
+ });
+
+ if result.is_none() {
+ self.is_complete = true;
+ }
+
+ result
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::engine::default::DefaultEngine;
+ use crate::log_replay::LogReplayProcessor;
+ use crate::scan::log_replay::ScanLogReplayProcessor;
+ use crate::scan::state_info::StateInfo;
+ use crate::SnapshotRef;
+ use object_store::local::LocalFileSystem;
+ use std::sync::Arc;
+ use tempfile::TempDir;
+
+ fn load_test_table(
+ table_name: &str,
+ ) -> DeltaResult<(Arc, SnapshotRef, Url, TempDir)> {
+ let test_dir = test_utils::load_test_data("tests/data", table_name)
+ .map_err(|e| crate::Error::Generic(format!("Failed to load test data: {}", e)))?;
+ let test_path = test_dir.path().join(table_name);
+
+ let url = url::Url::from_directory_path(&test_path)
+ .map_err(|_| crate::Error::Generic("Failed to create URL from path".to_string()))?;
+
+ let store = Arc::new(LocalFileSystem::new());
+ let engine = Arc::new(DefaultEngine::new(store));
+ let snapshot = crate::Snapshot::builder_for(url.clone()).build(engine.as_ref())?;
+
+ Ok((engine, snapshot, url, test_dir))
+ }
+
+ #[test]
+ fn test_manifest_phase_with_checkpoint() -> DeltaResult<()> {
+ // Use a table with v2 checkpoints where adds might be in sidecars
+ let (engine, snapshot, log_root, _tempdir) =
+ load_test_table("v2-checkpoints-json-with-sidecars")?;
+ let log_segment = snapshot.log_segment();
+
+ // Check if there are any checkpoint parts
+ if log_segment.checkpoint_parts.is_empty() {
+ println!("Test table has no checkpoint parts, skipping");
+ return Ok(());
+ }
+
+ let state_info = Arc::new(StateInfo::try_new(
+ snapshot.schema(),
+ snapshot.table_configuration(),
+ None,
+ (),
+ )?);
+
+ let mut processor = ScanLogReplayProcessor::new(engine.as_ref(), state_info)?;
+
+ // Get the first checkpoint part
+ let checkpoint_file = &log_segment.checkpoint_parts[0];
+ let manifest_file = checkpoint_file.location.clone();
+
+ let mut manifest_phase =
+ ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?;
+
+ // Count batches and collect results
+ let mut file_paths = Vec::new();
+
+ for result in manifest_phase {
+ let batch = result?;
+ let metadata = processor.process_actions_batch(batch)?;
+ let paths = metadata.visit_scan_files(
+ vec![],
+ |ps: &mut Vec, path, _, _, _, _, _| {
+ ps.push(path.to_string());
+ },
+ )?;
+ file_paths.extend(paths);
+ }
+ // Verify the manifest itself contains no add files (they're all in sidecars)
+ assert_eq!(
+ file_paths.len(), 0,
+ "For this v2 checkpoint with sidecars, manifest should contain 0 add files (all in sidecars)"
+ );
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_manifest_phase_collects_sidecars() -> DeltaResult<()> {
+ let (engine, snapshot, log_root, _tempdir) =
+ load_test_table("v2-checkpoints-json-with-sidecars")?;
+ let log_segment = snapshot.log_segment();
+
+ if log_segment.checkpoint_parts.is_empty() {
+ println!("Test table has no checkpoint parts, skipping");
+ return Ok(());
+ }
+
+ let checkpoint_file = &log_segment.checkpoint_parts[0];
+ let manifest_file = checkpoint_file.location.clone();
+
+ let mut manifest_phase =
+ ManifestPhase::new(manifest_file, log_root.clone(), engine.clone())?;
+
+ // Drain the phase
+ while manifest_phase.next().is_some() {}
+
+ // Check if sidecars were collected
+ let next = manifest_phase.finalize()?;
+
+ match next {
+ AfterManifest::Sidecars { sidecars } => {
+ // For the v2-checkpoints-json-with-sidecars test table at version 6,
+ // there are exactly 2 sidecar files
+ assert_eq!(
+ sidecars.len(),
+ 2,
+ "Should collect exactly 2 sidecars for checkpoint at version 6"
+ );
+
+ // Extract and verify the sidecar paths
+ let mut collected_paths: Vec = sidecars
+ .iter()
+ .map(|fm| {
+ // Get the filename from the URL path
+ fm.location
+ .path_segments()
+ .and_then(|segments| segments.last())
+ .unwrap_or("")
+ .to_string()
+ })
+ .collect();
+
+ collected_paths.sort();
+
+ // Verify they're the expected sidecar files for version 6
+ assert_eq!(collected_paths[0], "00000000000000000006.checkpoint.0000000001.0000000002.19af1366-a425-47f4-8fa6-8d6865625573.parquet");
+ assert_eq!(collected_paths[1], "00000000000000000006.checkpoint.0000000002.0000000002.5008b69f-aa8a-4a66-9299-0733a56a7e63.parquet");
+ }
+ AfterManifest::Done => {
+ panic!("Expected sidecars for v2-checkpoints-json-with-sidecars table");
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/kernel/src/log_reader/mod.rs b/kernel/src/log_reader/mod.rs
new file mode 100644
index 000000000..986a538a4
--- /dev/null
+++ b/kernel/src/log_reader/mod.rs
@@ -0,0 +1,3 @@
+pub(crate) mod commit;
+pub(crate) mod leaf;
+pub(crate) mod manifest;
diff --git a/kernel/src/log_segment.rs b/kernel/src/log_segment.rs
index 4eb5e1ee7..e6ef5fd41 100644
--- a/kernel/src/log_segment.rs
+++ b/kernel/src/log_segment.rs
@@ -9,6 +9,7 @@ use crate::actions::{
PROTOCOL_NAME, SIDECAR_NAME,
};
use crate::last_checkpoint_hint::LastCheckpointHint;
+use crate::log_reader::commit::CommitReader;
use crate::log_replay::ActionsBatch;
use crate::path::{LogPathFileType, ParsedLogPath};
use crate::schema::{SchemaRef, StructField, ToSchema as _};
@@ -303,15 +304,7 @@ impl LogSegment {
meta_predicate: Option,
) -> DeltaResult> + Send> {
// `replay` expects commit files to be sorted in descending order, so the return value here is correct
- let commits_and_compactions = self.find_commit_cover();
- let commit_stream = engine
- .json_handler()
- .read_json_files(
- &commits_and_compactions,
- commit_read_schema,
- meta_predicate.clone(),
- )?
- .map_ok(|batch| ActionsBatch::new(batch, true));
+ let commit_stream = CommitReader::try_new(engine, self, commit_read_schema)?;
let checkpoint_stream =
self.create_checkpoint_stream(engine, checkpoint_read_schema, meta_predicate)?;
@@ -340,7 +333,7 @@ impl LogSegment {
/// returns files is DESCENDING ORDER, as that's what `replay` expects. This function assumes
/// that all files in `self.ascending_commit_files` and `self.ascending_compaction_files` are in
/// range for this log segment. This invariant is maintained by our listing code.
- fn find_commit_cover(&self) -> Vec {
+ pub(crate) fn find_commit_cover(&self) -> Vec {
// Create an iterator sorted in ascending order by (initial version, end version), e.g.
// [00.json, 00.09.compacted.json, 00.99.compacted.json, 01.json, 02.json, ..., 10.json,
// 10.19.compacted.json, 11.json, ...]
diff --git a/kernel/src/scan/log_replay.rs b/kernel/src/scan/log_replay.rs
index 7bf69eb72..e0fe51b3f 100644
--- a/kernel/src/scan/log_replay.rs
+++ b/kernel/src/scan/log_replay.rs
@@ -53,7 +53,7 @@ pub(crate) struct ScanLogReplayProcessor {
impl ScanLogReplayProcessor {
/// Create a new [`ScanLogReplayProcessor`] instance
- fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult {
+ pub(crate) fn new(engine: &dyn Engine, state_info: Arc) -> DeltaResult {
// Extract the physical predicate from StateInfo's PhysicalPredicate enum.
// The DataSkippingFilter and partition_filter components expect the predicate
// in the format Option<(PredicateRef, SchemaRef)>, so we need to convert from
diff --git a/kernel/src/scan/mod.rs b/kernel/src/scan/mod.rs
index c571c346f..4fcb3af41 100644
--- a/kernel/src/scan/mod.rs
+++ b/kernel/src/scan/mod.rs
@@ -13,7 +13,7 @@ use self::log_replay::get_scan_metadata_transform_expr;
use crate::actions::deletion_vector::{
deletion_treemap_to_bools, split_vector, DeletionVectorDescriptor,
};
-use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME};
+use crate::actions::{get_commit_schema, ADD_NAME, REMOVE_NAME, SIDECAR_NAME};
use crate::engine_data::FilteredEngineData;
use crate::expressions::transforms::ExpressionTransform;
use crate::expressions::{ColumnName, ExpressionRef, Predicate, PredicateRef, Scalar};
@@ -41,7 +41,7 @@ pub(crate) mod state_info;
// safety: we define get_commit_schema() and _know_ it contains ADD_NAME and REMOVE_NAME
#[allow(clippy::unwrap_used)]
-static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| {
+pub(crate) static COMMIT_READ_SCHEMA: LazyLock = LazyLock::new(|| {
get_commit_schema()
.project(&[ADD_NAME, REMOVE_NAME])
.unwrap()