diff --git a/crates/store/re_log_encoding/src/rrd/decoder/iterator.rs b/crates/store/re_log_encoding/src/rrd/decoder/iterator.rs index f1b3cc1f8b70..ce3b9c1581dd 100644 --- a/crates/store/re_log_encoding/src/rrd/decoder/iterator.rs +++ b/crates/store/re_log_encoding/src/rrd/decoder/iterator.rs @@ -1,3 +1,4 @@ +use crate::RrdManifest; use crate::rrd::decoder::state_machine::DecoderState; use crate::rrd::{DecodeError, Decoder, DecoderEntrypoint}; @@ -109,6 +110,18 @@ impl DecoderIterator { } } +impl DecoderIterator { + /// Returns all the RRD manifests accumulated _so far_. + /// + /// RRD manifests are parsed from footers, of which there might be more than one e.g. in the + /// case of concatenated streams. + /// + /// This is not cheap: it automatically performs the transport to app level conversion. + pub fn rrd_manifests(&self) -> Result, DecodeError> { + self.decoder.rrd_manifests() + } +} + impl std::iter::Iterator for DecoderIterator { type Item = Result; diff --git a/crates/store/re_log_encoding/src/rrd/decoder/state_machine.rs b/crates/store/re_log_encoding/src/rrd/decoder/state_machine.rs index 058f92157927..0ce5bd84318b 100644 --- a/crates/store/re_log_encoding/src/rrd/decoder/state_machine.rs +++ b/crates/store/re_log_encoding/src/rrd/decoder/state_machine.rs @@ -9,7 +9,7 @@ use crate::StreamFooter; use crate::rrd::MessageHeader; use crate::{ CachingApplicationIdInjector, CodecError, Decodable as _, DecodeError, DecoderEntrypoint, - EncodingOptions, Serializer, StreamHeader, + EncodingOptions, RrdManifest, Serializer, StreamHeader, ToApplication as _, }; // --- @@ -56,6 +56,11 @@ pub struct Decoder { /// The application id cache used for migrating old data. pub(crate) app_id_cache: CachingApplicationIdInjector, + /// All the RRD manifests accumulated so far by parsing incoming footers in the RRD stream. + /// + /// Transport-level types to keep decoding cheap. + rrd_manifests: Vec, + _decodable: std::marker::PhantomData, } @@ -120,6 +125,7 @@ impl Decoder { byte_chunks: ByteChunkBuffer::new(), state: DecoderState::WaitingForStreamHeader, app_id_cache: CachingApplicationIdInjector::default(), + rrd_manifests: Vec::new(), _decodable: std::marker::PhantomData::, } } @@ -129,6 +135,20 @@ impl Decoder { self.byte_chunks.push(byte_chunk); } + /// Returns all the RRD manifests accumulated _so far_. + /// + /// RRD manifests are parsed from footers, of which there might be more than one e.g. in the + /// case of concatenated streams. + /// + /// This is not cheap: it automatically performs the transport to app level conversion. + pub fn rrd_manifests(&self) -> Result, DecodeError> { + re_tracing::profile_function!(); + self.rrd_manifests + .iter() + .map(|m| m.to_application(()).map_err(Into::into)) + .collect() + } + /// Read the next message in the stream, dropping messages missing application id that cannot /// be migrated (because they arrived before `SetStoreInfo`). pub fn try_read(&mut self) -> Result, DecodeError> { @@ -301,7 +321,7 @@ impl Decoder { if !bytes.is_empty() { let rrd_footer = re_protos::log_msg::v1alpha1::RrdFooter::from_rrd_bytes(&bytes)?; - _ = rrd_footer; // TODO(cmc): we'll use that in the next PR, promise. + self.rrd_manifests.extend(rrd_footer.manifests); // A non-empty ::End message means there must be a footer ahead, no exception. self.state = DecoderState::WaitingForStreamFooter; diff --git a/crates/store/re_log_encoding/src/rrd/decoder/stream.rs b/crates/store/re_log_encoding/src/rrd/decoder/stream.rs index e4b05a408f78..ef4ed0b87dca 100644 --- a/crates/store/re_log_encoding/src/rrd/decoder/stream.rs +++ b/crates/store/re_log_encoding/src/rrd/decoder/stream.rs @@ -3,7 +3,10 @@ use std::pin::Pin; use tokio::io::AsyncBufRead; use tokio_stream::{Stream, StreamExt as _}; -use crate::rrd::{DecodeError, Decoder, DecoderEntrypoint, decoder::state_machine::DecoderState}; +use crate::{ + RrdManifest, + rrd::{DecodeError, Decoder, DecoderEntrypoint, decoder::state_machine::DecoderState}, +}; // --- @@ -107,6 +110,18 @@ pub struct DecoderStream { pub first_msg: Option, } +impl DecoderStream { + /// Returns all the RRD manifests accumulated _so far_. + /// + /// RRD manifests are parsed from footers, of which there might be more than one e.g. in the + /// case of concatenated streams. + /// + /// This is not cheap: it automatically performs the transport to app level conversion. + pub fn rrd_manifests(&self) -> Result, DecodeError> { + self.decoder.rrd_manifests() + } +} + // NOTE: This is the exact same implementation as `impl Iterator for DecoderIterator`, just asyncified. impl Stream for DecoderStream { type Item = Result; diff --git a/crates/store/re_log_encoding/src/rrd/encoder.rs b/crates/store/re_log_encoding/src/rrd/encoder.rs index 4013d85c194b..b42d0414b38d 100644 --- a/crates/store/re_log_encoding/src/rrd/encoder.rs +++ b/crates/store/re_log_encoding/src/rrd/encoder.rs @@ -1,15 +1,15 @@ //! Encoding of [`LogMsg`]es as a binary stream, e.g. to store in an `.rrd` file, or send over network. -use std::borrow::Borrow; +use std::{borrow::Borrow, collections::HashMap}; use re_build_info::CrateVersion; use re_chunk::{ChunkError, ChunkResult}; -use re_log_types::LogMsg; +use re_log_types::{LogMsg, StoreId}; use re_sorbet::SorbetError; use crate::{ CodecError, Compression, Encodable as _, EncodingOptions, MessageHeader, MessageKind, - Serializer, StreamFooter, StreamHeader, ToTransport as _, + RrdManifestBuilder, Serializer, StreamFooter, StreamHeader, ToTransport as _, }; // ---------------------------------------------------------------------------- @@ -103,13 +103,28 @@ struct FooterState { /// want to override the recording ID of each chunk with that one (because that's the existing /// behavior, certainly not because it's nice). recording_id_scope: Option, + + manifests: HashMap, +} + +/// The accumulated state for a specific RRD manifest. +#[derive(Default)] +struct ManifestState { + /// The accumulated recording IDs of each individual chunk, extracted from their `LogMsg`. + /// + /// In most normal scenarios, this will just be the same value repeated N times. + /// + /// This will only be used if [`FooterState::recording_id_scope`] is empty. + recording_ids: Vec, + + /// The state of the RRD manifest currently being built. + manifest: RrdManifestBuilder, } impl FooterState { - #[expect(clippy::unnecessary_wraps)] // won't stay for long fn append( &mut self, - _byte_span_excluding_header: re_span::Span, + byte_span_excluding_header: re_span::Span, msg: &re_log_types::LogMsg, ) -> Result<(), EncodeError> { match msg { @@ -117,16 +132,58 @@ impl FooterState { self.recording_id_scope = Some(msg.info.store_id.clone()); } - LogMsg::ArrowMsg(_, _) | LogMsg::BlueprintActivationCommand(_) => {} + LogMsg::ArrowMsg(store_id, msg) => { + // NOTE(1): The fact that this parses the `RecordBatch` back into an actual `Chunk` + // is a bit unfortunate, but really it's nowhere near as bad as one might think: + // the real costly work is to parse the IPC payload into a `RecordBatch` in the + // first place, but thankfully we don't have to repay that cost here. + // Not only that: keep in mind that this entire codepath is only taken when writing + // actual RRD files, so performance is generally IO bound anyway. + // + // NOTE(2): The fact that we also perform a Sorbet migration in the process is a + // bit weirder on the other hand, but then again this is generally not a new a + // problem: we tend to perform Sorbet migrations a bit too aggressively all over + // the place. We really need a layer that sits between the transport and + // application layer where one can accessed the parsed, unmigrated data. + let chunk_batch = re_sorbet::ChunkBatch::try_from(&msg.batch)?; + + // See `self.recording_id_scope` for some explanations. + let recording_id = self + .recording_id_scope + .clone() + .unwrap_or_else(|| store_id.clone()); + + // This line is important: it implies that if a recording doesn't have any data + // chunks at all, we do not even reserve an RRD manifest for it in the footer. + let ManifestState { + recording_ids, + manifest, + } = self.manifests.entry(recording_id.clone()).or_default(); + + recording_ids.push(recording_id); + manifest.append(&chunk_batch, byte_span_excluding_header)?; + } + + LogMsg::BlueprintActivationCommand(_) => {} } Ok(()) } - #[expect(clippy::unnecessary_wraps, clippy::unused_self)] // won't stay for long fn finish(self) -> Result { + let manifests: Result, _> = self + .manifests + .into_iter() + .map(|(store_id, state)| { + state + .manifest + .build(store_id.clone()) + .map(|m| (store_id, m)) + }) + .collect(); + Ok(crate::RrdFooter { - manifests: Default::default(), + manifests: manifests?, }) } } diff --git a/crates/store/re_log_encoding/src/rrd/footer/builders.rs b/crates/store/re_log_encoding/src/rrd/footer/builders.rs index 6cd4275579a7..6704c675058a 100644 --- a/crates/store/re_log_encoding/src/rrd/footer/builders.rs +++ b/crates/store/re_log_encoding/src/rrd/footer/builders.rs @@ -71,8 +71,7 @@ impl RrdManifestBuilder { pub fn append( &mut self, chunk_batch: &re_sorbet::ChunkBatch, - byte_offset_excluding_header: u64, - byte_size_excluding_header: u64, + byte_span_excluding_header: re_span::Span, ) -> CodecResult<()> { self.sorbet_schema.add_chunk(chunk_batch); @@ -81,9 +80,9 @@ impl RrdManifestBuilder { self.column_chunk_ids.push(chunk.id()); self.column_chunk_is_static.push(chunk.is_static()); self.column_byte_offsets_excluding_headers - .push(byte_offset_excluding_header); + .push(byte_span_excluding_header.start); self.column_byte_sizes_excluding_headers - .push(byte_size_excluding_header); + .push(byte_span_excluding_header.len); self.column_entity_paths.push(chunk.entity_path().clone()); if chunk.is_static() { diff --git a/crates/store/re_log_encoding/src/rrd/footer/instances.rs b/crates/store/re_log_encoding/src/rrd/footer/instances.rs index 95184b4eadee..1166f4e58709 100644 --- a/crates/store/re_log_encoding/src/rrd/footer/instances.rs +++ b/crates/store/re_log_encoding/src/rrd/footer/instances.rs @@ -441,9 +441,13 @@ impl RrdManifest { .collect(); for sorbet_index in &sorbet_indexes { + // We must account for the fact that names in the Sorbet schema are not normalized yet. + let sorbet_index_name_normalized = + Self::compute_column_name(None, None, None, Some(sorbet_index.name()), None); + // All global indexes should have :start and :end columns of the right type. for suffix in ["start", "end"] { - let field = rrd_manifest_fields.remove(&format!("{}:{suffix}", sorbet_index.name())) + let field = rrd_manifest_fields.remove(&format!("{sorbet_index_name_normalized}:{suffix}")) .ok_or_else(|| { crate::CodecError::from(ChunkError::Malformed { reason: format!( diff --git a/crates/store/re_log_encoding/src/rrd/log_msg.rs b/crates/store/re_log_encoding/src/rrd/log_msg.rs index 105c5c350815..5c8d5ee0df48 100644 --- a/crates/store/re_log_encoding/src/rrd/log_msg.rs +++ b/crates/store/re_log_encoding/src/rrd/log_msg.rs @@ -60,6 +60,16 @@ impl Encodable for re_protos::log_msg::v1alpha1::RrdFooter { } } +impl Encodable for re_protos::log_msg::v1alpha1::RrdManifest { + fn to_rrd_bytes(&self, out: &mut Vec) -> Result { + use re_protos::external::prost::Message as _; + + let before = out.len() as u64; + self.encode(out)?; + Ok(out.len() as u64 - before) + } +} + // NOTE: This is implemented for `Option<_>` because, in the native RRD protocol, the message kind // might be `MessageKind::End` (signifying end-of-stream), which has no representation in our Protobuf // definitions. I.e. `MessageKind::End` yields `None`. @@ -122,3 +132,10 @@ impl Decodable for re_protos::log_msg::v1alpha1::RrdFooter { Ok(Self::decode(data)?) } } + +impl Decodable for re_protos::log_msg::v1alpha1::RrdManifest { + fn from_rrd_bytes(data: &[u8]) -> Result { + use re_protos::external::prost::Message as _; + Ok(Self::decode(data)?) + } +} diff --git a/crates/store/re_log_encoding/tests/footers_and_manifests.rs b/crates/store/re_log_encoding/tests/footers_and_manifests.rs index 9db578b95b11..a4d7bd8b06d1 100644 --- a/crates/store/re_log_encoding/tests/footers_and_manifests.rs +++ b/crates/store/re_log_encoding/tests/footers_and_manifests.rs @@ -1,9 +1,14 @@ #![expect(clippy::unwrap_used)] +use itertools::Itertools as _; + use re_arrow_util::RecordBatchTestExt as _; use re_chunk::{Chunk, ChunkId, RowId, TimePoint}; -use re_log_encoding::{Decodable as _, Encoder, RrdManifestBuilder, ToApplication as _}; -use re_log_types::{LogMsg, StoreId, build_log_time, external::re_tuid::Tuid}; +use re_log_encoding::{ + Decodable as _, DecoderApp, Encoder, RrdManifest, RrdManifestBuilder, ToApplication as _, +}; +use re_log_types::{ArrowMsg, LogMsg, StoreId, StoreKind, build_log_time, external::re_tuid::Tuid}; +use re_protos::external::prost::Message as _; #[test] fn simple_manifest() { @@ -14,8 +19,12 @@ fn simple_manifest() { let chunk_batch = re_sorbet::ChunkBatch::try_from(&msg.batch).unwrap(); let chunk_byte_size = chunk_batch.heap_size_bytes().unwrap(); + let chunk_byte_span_excluding_header = re_span::Span { + start: byte_offset_excluding_header, + len: chunk_byte_size, + }; builder - .append(&chunk_batch, byte_offset_excluding_header, chunk_byte_size) + .append(&chunk_batch, chunk_byte_span_excluding_header) .unwrap(); byte_offset_excluding_header += chunk_byte_size; @@ -33,6 +42,243 @@ fn simple_manifest() { ); } +#[test] +fn footer_roundtrip() { + let msgs_expected_recording = generate_recording(generate_recording_chunks(1)).collect_vec(); + let msgs_expected_blueprint = generate_blueprint(generate_blueprint_chunks(2)).collect_vec(); + + let msgs_encoded = Encoder::encode( + msgs_expected_recording + .clone() + .into_iter() + .map(Ok) + .chain(msgs_expected_blueprint.clone().into_iter().map(Ok)), + ) + .unwrap(); + + let store_id_recording = generate_recording_store_id(); + let store_id_blueprint = generate_blueprint_store_id(); + + let mut decoder = DecoderApp::decode_lazy(msgs_encoded.as_slice()); + let mut msgs_decoded_recording = Vec::new(); + let mut msgs_decoded_blueprint = Vec::new(); + for msg in &mut decoder { + let msg = msg.unwrap(); + match msg { + LogMsg::ArrowMsg(store_id, arrow_msg) => match store_id { + id if id == store_id_recording => msgs_decoded_recording.push(arrow_msg), + id if id == store_id_blueprint => msgs_decoded_blueprint.push(arrow_msg), + _ => unreachable!(), + }, + + LogMsg::SetStoreInfo(_) | LogMsg::BlueprintActivationCommand(_) => {} + } + } + + let stream_footer_start = msgs_encoded + .len() + .checked_sub(re_log_encoding::StreamFooter::ENCODED_SIZE_BYTES) + .unwrap(); + let stream_footer = + re_log_encoding::StreamFooter::from_rrd_bytes(&msgs_encoded[stream_footer_start..]) + .unwrap(); + + let rrd_footer_range = stream_footer + .rrd_footer_byte_span_from_start_excluding_header + .try_cast::() + .unwrap() + .range(); + let rrd_footer_bytes = &msgs_encoded[rrd_footer_range]; + + { + let crc = re_log_encoding::StreamFooter::from_rrd_footer_bytes( + stream_footer + .rrd_footer_byte_span_from_start_excluding_header + .start, + rrd_footer_bytes, + ) + .crc_excluding_header; + similar_asserts::assert_eq!(stream_footer.crc_excluding_header, crc); + } + + let rrd_footer = + re_protos::log_msg::v1alpha1::RrdFooter::from_rrd_bytes(rrd_footer_bytes).unwrap(); + let mut rrd_footer = rrd_footer.to_application(()).unwrap(); + + let rrd_manifest_recording = rrd_footer.manifests.remove(&store_id_recording).unwrap(); + let rrd_manifest_blueprint = rrd_footer.manifests.remove(&store_id_blueprint).unwrap(); + + fn decode_messages(msgs_encoded: &[u8], rrd_manifest: &RrdManifest) -> Vec { + itertools::izip!( + rrd_manifest.col_chunk_byte_offset().unwrap(), + rrd_manifest.col_chunk_byte_size().unwrap() + ) + .map(|(offset, size)| { + let chunk_start_excluding_header = offset as usize; + let chunk_end_excluding_header = chunk_start_excluding_header + size as usize; + let buf = &msgs_encoded[chunk_start_excluding_header..chunk_end_excluding_header]; + let arrow_msg = re_protos::log_msg::v1alpha1::ArrowMsg::decode(buf).unwrap(); + arrow_msg.to_application(()).unwrap() + }) + .collect() + } + + let (msgs_decoded_recording_from_footer, msgs_decoded_blueprint_from_footer) = ( + decode_messages(&msgs_encoded, &rrd_manifest_recording), + decode_messages(&msgs_encoded, &rrd_manifest_blueprint), + ); + + // Check that the RRD manifests decoded "traditionally" match those obtained via random access / footer. + + let sequential_manifests = decoder.rrd_manifests().unwrap(); + let rrd_manifest_blueprint_sequential = sequential_manifests + .iter() + .find(|m| m.store_id.kind() == StoreKind::Blueprint) + .cloned() + .unwrap(); + let rrd_manifest_recording_sequential = sequential_manifests + .iter() + .find(|m| m.store_id.kind() == StoreKind::Recording) + .cloned() + .unwrap(); + + insta::assert_snapshot!( + "rrd_manifest_blueprint", + rrd_manifest_blueprint_sequential.data.format_snapshot(true), + ); + insta::assert_snapshot!( + "rrd_manifest_blueprint_schema", + rrd_manifest_blueprint_sequential + .data + .format_schema_snapshot(), + ); + insta::assert_snapshot!( + "rrd_manifest_recording", + rrd_manifest_recording_sequential.data.format_snapshot(true), + ); + insta::assert_snapshot!( + "rrd_manifest_recording_schema", + rrd_manifest_recording_sequential + .data + .format_schema_snapshot(), + ); + + similar_asserts::assert_eq!( + rrd_manifest_recording_sequential.data.format_snapshot(true), + rrd_manifest_recording.data.format_snapshot(true), + "RRD manifest decoded sequentially should be identical to the one decoded by jumping via the footer", + ); + // Same test but check everything, not just the manifest data (we do both cause we want a nice diff for the manifest data) + similar_asserts::assert_eq!( + &rrd_manifest_recording_sequential, + &rrd_manifest_recording, + "RRD manifest decoded sequentially should be identical to the one decoded by jumping via the footer", + ); + + similar_asserts::assert_eq!( + rrd_manifest_blueprint_sequential.data.format_snapshot(true), + rrd_manifest_blueprint.data.format_snapshot(true), + "RRD manifest decoded sequentially should be identical to the one decoded by jumping via the footer", + ); + // Same test but check everything, not just the manifest data (we do both cause we want a nice diff for the manifest data) + similar_asserts::assert_eq!( + &rrd_manifest_blueprint_sequential, + &rrd_manifest_blueprint, + "RRD manifest decoded sequentially should be identical to the one decoded by jumping via the footer", + ); + + // Check that the data decoded "traditionally" matches the data decoded via random access / footer. + + similar_asserts::assert_eq!( + msgs_decoded_recording, + msgs_decoded_recording_from_footer, + "chunks decoded sequentially should be identical to those decoded by jumping around using the RRD manifest in the footer", + ); + + similar_asserts::assert_eq!( + msgs_decoded_blueprint, + msgs_decoded_blueprint_from_footer, + "chunks decoded sequentially should be identical to those decoded by jumping around using the RRD manifest in the footer", + ); + + let msgs_reencoded = Encoder::encode( + itertools::chain!( + generate_recording(msgs_decoded_recording_from_footer.into_iter()), + generate_blueprint(msgs_decoded_blueprint_from_footer.into_iter()) + ) + .map(Ok), + ) + .unwrap(); + + // And finally, let's reencode all the messages we decoded back into an RRD stream + { + let reencoded_stream_footer_start = msgs_reencoded + .len() + .checked_sub(re_log_encoding::StreamFooter::ENCODED_SIZE_BYTES) + .unwrap(); + let reencoded_stream_footer = re_log_encoding::StreamFooter::from_rrd_bytes( + &msgs_reencoded[reencoded_stream_footer_start..], + ) + .unwrap(); + + let reencoded_rrd_footer_range = reencoded_stream_footer + .rrd_footer_byte_span_from_start_excluding_header + .try_cast::() + .unwrap() + .range(); + let reencoded_rrd_footer_bytes = &msgs_reencoded[reencoded_rrd_footer_range]; + + { + let crc = re_log_encoding::StreamFooter::from_rrd_footer_bytes( + reencoded_stream_footer + .rrd_footer_byte_span_from_start_excluding_header + .start, + reencoded_rrd_footer_bytes, + ) + .crc_excluding_header; + similar_asserts::assert_eq!(reencoded_stream_footer.crc_excluding_header, crc); + } + + let reencoded_rrd_footer = + re_protos::log_msg::v1alpha1::RrdFooter::from_rrd_bytes(reencoded_rrd_footer_bytes) + .unwrap(); + let mut reencoded_rrd_footer = reencoded_rrd_footer.to_application(()).unwrap(); + + let reencoded_rrd_manifest_recording = reencoded_rrd_footer + .manifests + .remove(&store_id_recording) + .unwrap(); + let reencoded_rrd_manifest_blueprint = reencoded_rrd_footer + .manifests + .remove(&store_id_blueprint) + .unwrap(); + + similar_asserts::assert_eq!( + rrd_manifest_recording.data.format_snapshot(true), + reencoded_rrd_manifest_recording.data.format_snapshot(true), + "Reencoded RRD manifest should be identical to the original one", + ); + // Same test but check everything, not just the manifest data (we do both cause we want a nice diff for the manifest data) + similar_asserts::assert_eq!( + &rrd_manifest_recording, + &reencoded_rrd_manifest_recording, + "Reencoded RRD manifest should be identical to the original one", + ); + + similar_asserts::assert_eq!( + rrd_manifest_blueprint.data.format_snapshot(true), + reencoded_rrd_manifest_blueprint.data.format_snapshot(true), + "Reencoded RRD manifest should be identical to the original one", + ); + // Same test but check everything, not just the manifest data (we do both cause we want a nice diff for the manifest data) + similar_asserts::assert_eq!( + &rrd_manifest_blueprint, + &reencoded_rrd_manifest_blueprint, + "Reencoded RRD manifest should be identical to the original one", + ); + } +} + #[test] fn footer_empty() { fn generate_store_id() -> StoreId { @@ -84,11 +330,39 @@ fn footer_empty() { let rrd_footer = re_protos::log_msg::v1alpha1::RrdFooter::from_rrd_bytes(rrd_footer_bytes).unwrap(); - let _rrd_footer = rrd_footer.to_application(()).unwrap(); + let rrd_footer = rrd_footer.to_application(()).unwrap(); + + // Right now, the implemented behavior is that we end up with an empty footer, i.e. there are + // no manifests in it. + // Whether that's the correct behavior is another question, but at least it is defined for now + // and can be changed. + assert!(rrd_footer.manifests.is_empty()); } // --- +fn generate_recording_store_id() -> StoreId { + StoreId::recording("my_app", "my_recording") +} + +fn generate_recording( + chunks: impl Iterator, +) -> impl Iterator { + let store_id = generate_recording_store_id(); + + std::iter::once(LogMsg::SetStoreInfo(re_log_types::SetStoreInfo { + row_id: *RowId::ZERO, + info: re_log_types::StoreInfo { + store_id: store_id.clone(), + cloned_from: None, + store_source: re_log_types::StoreSource::Unknown, + store_version: Some(re_build_info::CrateVersion::new(1, 2, 3)), + is_partial: false, + }, + })) + .chain(chunks.map(move |chunk| LogMsg::ArrowMsg(store_id.clone(), chunk))) +} + fn generate_recording_chunks(tuid_prefix: u64) -> impl Iterator { use re_log_types::{ TimeInt, TimeType, Timeline, build_frame_nr, @@ -102,7 +376,8 @@ fn generate_recording_chunks(tuid_prefix: u64) -> impl Iterator (Timeline, TimeInt) { ( - Timeline::new("elapsed", TimeType::DurationNs), + // Intentionally bringing some whitespaces into the mix 🫠 + Timeline::new("elapsed time", TimeType::DurationNs), TimeInt::saturated_temporal(value * 1e9 as i64), ) } @@ -176,6 +451,50 @@ fn generate_recording_chunks(tuid_prefix: u64) -> impl Iterator StoreId { + StoreId::new(StoreKind::Blueprint, "my_app", "my_blueprint") +} + +fn generate_blueprint( + chunks: impl Iterator, +) -> impl Iterator { + let store_id = generate_blueprint_store_id(); + + std::iter::once(LogMsg::SetStoreInfo(re_log_types::SetStoreInfo { + row_id: *RowId::ZERO, + info: re_log_types::StoreInfo { + store_id: store_id.clone(), + cloned_from: None, + store_source: re_log_types::StoreSource::Unknown, + store_version: Some(re_build_info::CrateVersion::new(4, 5, 6)), + is_partial: false, + }, + })) + .chain(chunks.map(move |chunk| LogMsg::ArrowMsg(store_id.clone(), chunk))) +} + +fn generate_blueprint_chunks(tuid_prefix: u64) -> impl Iterator { + use re_log_types::{EntityPath, TimeInt, build_frame_nr}; + use re_types::blueprint::archetypes::TimePanelBlueprint; + + let mut next_chunk_id = next_chunk_id_generator(tuid_prefix); + let mut next_row_id = next_row_id_generator(tuid_prefix); + + [ + Chunk::builder_with_id(next_chunk_id(), EntityPath::from("/time_panel")) + .with_archetype( + next_row_id(), + [build_frame_nr(TimeInt::new_temporal(0))], + &TimePanelBlueprint::default().with_fps(60.0), + ) + .build() + .unwrap() + .to_arrow_msg() + .unwrap(), + ] + .into_iter() +} + fn next_chunk_id_generator(prefix: u64) -> impl FnMut() -> ChunkId { let mut chunk_id = ChunkId::from_tuid(Tuid::from_nanos_and_inc(prefix, 0)); move || { diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint.snap new file mode 100644 index 000000000000..eed0b83083e2 --- /dev/null +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint.snap @@ -0,0 +1,25 @@ +--- +source: crates/store/re_log_encoding/tests/footers_and_manifests.rs +expression: rrd_manifest_blueprint_sequential.data.format_snapshot(true) +--- +┌────────────────────────────────────────┬──────────────────────────────────┐ +│ chunk_entity_path ┆ /time_panel │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_id ┆ 00000000000000020000000000000001 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_is_static ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_offset ┆ 2796 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_size ┆ 1101 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ TimePanelBlueprint:fps:has_static_data ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:start ┆ 0 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:end ┆ 0 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:TimePanelBlueprint:fps:start ┆ 0 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:TimePanelBlueprint:fps:end ┆ 0 │ +└────────────────────────────────────────┴──────────────────────────────────┘ diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint_schema.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint_schema.snap new file mode 100644 index 000000000000..c4a2f1967800 --- /dev/null +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_blueprint_schema.snap @@ -0,0 +1,33 @@ +--- +source: crates/store/re_log_encoding/tests/footers_and_manifests.rs +expression: rrd_manifest_blueprint_sequential.data.format_schema_snapshot() +--- +TimePanelBlueprint:fps:has_static_data: bool [ + rerun:archetype:rerun.blueprint.archetypes.TimePanelBlueprint + rerun:component:TimePanelBlueprint:fps + rerun:component_type:rerun.blueprint.components.Fps + rerun:index:rerun:static +] +chunk_byte_offset: u64 +chunk_byte_size: u64 +chunk_entity_path: Utf8 +chunk_id: FixedSizeBinary[16] +chunk_is_static: bool +frame_nr:TimePanelBlueprint:fps:end: i64 [ + rerun:archetype:rerun.blueprint.archetypes.TimePanelBlueprint + rerun:component:TimePanelBlueprint:fps + rerun:component_type:rerun.blueprint.components.Fps + rerun:index:frame_nr +] +frame_nr:TimePanelBlueprint:fps:start: i64 [ + rerun:archetype:rerun.blueprint.archetypes.TimePanelBlueprint + rerun:component:TimePanelBlueprint:fps + rerun:component_type:rerun.blueprint.components.Fps + rerun:index:frame_nr +] +frame_nr:end: i64 [ + rerun:index:frame_nr +] +frame_nr:start: i64 [ + rerun:index:frame_nr +] diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording.snap new file mode 100644 index 000000000000..25af03244b6c --- /dev/null +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording.snap @@ -0,0 +1,57 @@ +--- +source: crates/store/re_log_encoding/tests/footers_and_manifests.rs +expression: rrd_manifest_recording_sequential.data.format_snapshot(true) +--- +┌────────────────────────────────────────────┬──────────────────────────────────┬──────────────────────────────────┐ +│ chunk_entity_path ┆ /my_entity ┆ /my_entity │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_id ┆ 00000000000000010000000000000001 ┆ 00000000000000010000000000000002 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_is_static ┆ false ┆ true │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_offset ┆ 104 ┆ 1757 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_size ┆ 1637 ┆ 947 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:colors:has_static_data ┆ false ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:labels:has_static_data ┆ false ┆ true │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:points:has_static_data ┆ false ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:start ┆ PT10S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:end ┆ PT40S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:start ┆ 10 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:end ┆ 40 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:colors:start ┆ PT20S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:colors:end ┆ PT30S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:points:start ┆ PT10S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:points:end ┆ PT40S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:colors:start ┆ 20 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:colors:end ┆ 30 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:points:start ┆ 10 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:points:end ┆ 40 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:colors:start ┆ 1970-01-01T00:00:00.000000020 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:colors:end ┆ 1970-01-01T00:00:00.000000030 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:points:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:points:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ +└────────────────────────────────────────────┴──────────────────────────────────┴──────────────────────────────────┘ diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording_schema.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording_schema.snap new file mode 100644 index 000000000000..8479f8fa6262 --- /dev/null +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__rrd_manifest_recording_schema.snap @@ -0,0 +1,117 @@ +--- +source: crates/store/re_log_encoding/tests/footers_and_manifests.rs +expression: rrd_manifest_recording_sequential.data.format_schema_snapshot() +--- +chunk_byte_offset: u64 +chunk_byte_size: u64 +chunk_entity_path: Utf8 +chunk_id: FixedSizeBinary[16] +chunk_is_static: bool +elapsed_time:end: Duration(ns) [ + rerun:index:elapsed time +] +elapsed_time:example_MyPoints:colors:end: Duration(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:elapsed time +] +elapsed_time:example_MyPoints:colors:start: Duration(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:elapsed time +] +elapsed_time:example_MyPoints:points:end: Duration(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:elapsed time +] +elapsed_time:example_MyPoints:points:start: Duration(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:elapsed time +] +elapsed_time:start: Duration(ns) [ + rerun:index:elapsed time +] +example_MyPoints:colors:has_static_data: bool [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:rerun:static +] +example_MyPoints:labels:has_static_data: bool [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:labels + rerun:component_type:example.MyLabel + rerun:index:rerun:static +] +example_MyPoints:points:has_static_data: bool [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:rerun:static +] +frame_nr:end: i64 [ + rerun:index:frame_nr +] +frame_nr:example_MyPoints:colors:end: i64 [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:frame_nr +] +frame_nr:example_MyPoints:colors:start: i64 [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:frame_nr +] +frame_nr:example_MyPoints:points:end: i64 [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:frame_nr +] +frame_nr:example_MyPoints:points:start: i64 [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:frame_nr +] +frame_nr:start: i64 [ + rerun:index:frame_nr +] +log_time:end: Timestamp(ns) [ + rerun:index:log_time +] +log_time:example_MyPoints:colors:end: Timestamp(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:log_time +] +log_time:example_MyPoints:colors:start: Timestamp(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:colors + rerun:component_type:example.MyColor + rerun:index:log_time +] +log_time:example_MyPoints:points:end: Timestamp(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:log_time +] +log_time:example_MyPoints:points:start: Timestamp(ns) [ + rerun:archetype:example.MyPoints + rerun:component:example.MyPoints:points + rerun:component_type:example.MyPoint + rerun:index:log_time +] +log_time:start: Timestamp(ns) [ + rerun:index:log_time +] diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch.snap index 8355466e28a9..37f79a7f9d56 100644 --- a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch.snap +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch.snap @@ -2,56 +2,56 @@ source: crates/store/re_log_encoding/tests/footers_and_manifests.rs expression: rrd_manifest_batch.format_snapshot(true) --- -┌─────────────────────────────────────────┬──────────────────────────────────┬──────────────────────────────────┐ -│ chunk_entity_path ┆ /my_entity ┆ /my_entity │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ chunk_id ┆ 00000000000000010000000000000001 ┆ 00000000000000010000000000000002 │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ chunk_is_static ┆ false ┆ true │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ chunk_byte_offset ┆ 0 ┆ 962 │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ chunk_byte_size ┆ 962 ┆ 243 │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ example_MyPoints:colors:has_static_data ┆ false ┆ false │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ example_MyPoints:labels:has_static_data ┆ false ┆ true │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ example_MyPoints:points:has_static_data ┆ false ┆ false │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:start ┆ PT10S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:end ┆ PT40S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:start ┆ 10 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:end ┆ 40 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:example_MyPoints:colors:start ┆ PT20S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:example_MyPoints:colors:end ┆ PT30S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:example_MyPoints:points:start ┆ PT10S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ elapsed:example_MyPoints:points:end ┆ PT40S ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:example_MyPoints:colors:start ┆ 20 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:example_MyPoints:colors:end ┆ 30 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:example_MyPoints:points:start ┆ 10 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ frame_nr:example_MyPoints:points:end ┆ 40 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:example_MyPoints:colors:start ┆ 1970-01-01T00:00:00.000000020 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:example_MyPoints:colors:end ┆ 1970-01-01T00:00:00.000000030 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:example_MyPoints:points:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ -├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ -│ log_time:example_MyPoints:points:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ -└─────────────────────────────────────────┴──────────────────────────────────┴──────────────────────────────────┘ +┌────────────────────────────────────────────┬──────────────────────────────────┬──────────────────────────────────┐ +│ chunk_entity_path ┆ /my_entity ┆ /my_entity │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_id ┆ 00000000000000010000000000000001 ┆ 00000000000000010000000000000002 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_is_static ┆ false ┆ true │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_offset ┆ 0 ┆ 962 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ chunk_byte_size ┆ 962 ┆ 243 │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:colors:has_static_data ┆ false ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:labels:has_static_data ┆ false ┆ true │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ example_MyPoints:points:has_static_data ┆ false ┆ false │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:start ┆ PT10S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:end ┆ PT40S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:start ┆ 10 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:end ┆ 40 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:colors:start ┆ PT20S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:colors:end ┆ PT30S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:points:start ┆ PT10S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ elapsed_time:example_MyPoints:points:end ┆ PT40S ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:colors:start ┆ 20 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:colors:end ┆ 30 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:points:start ┆ 10 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ frame_nr:example_MyPoints:points:end ┆ 40 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:colors:start ┆ 1970-01-01T00:00:00.000000020 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:colors:end ┆ 1970-01-01T00:00:00.000000030 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:points:start ┆ 1970-01-01T00:00:00.000000010 ┆ null │ +├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ +│ log_time:example_MyPoints:points:end ┆ 1970-01-01T00:00:00.000000040 ┆ null │ +└────────────────────────────────────────────┴──────────────────────────────────┴──────────────────────────────────┘ diff --git a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch_schema.snap b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch_schema.snap index f05ec42a320b..d743e15817b5 100644 --- a/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch_schema.snap +++ b/crates/store/re_log_encoding/tests/snapshots/footers_and_manifests__simple_manifest_batch_schema.snap @@ -7,35 +7,35 @@ chunk_byte_size: u64 chunk_entity_path: Utf8 chunk_id: FixedSizeBinary[16] chunk_is_static: bool -elapsed:end: Duration(ns) [ - rerun:index:elapsed +elapsed_time:end: Duration(ns) [ + rerun:index:elapsed time ] -elapsed:example_MyPoints:colors:end: Duration(ns) [ +elapsed_time:example_MyPoints:colors:end: Duration(ns) [ rerun:archetype:example.MyPoints rerun:component:example.MyPoints:colors rerun:component_type:example.MyColor - rerun:index:elapsed + rerun:index:elapsed time ] -elapsed:example_MyPoints:colors:start: Duration(ns) [ +elapsed_time:example_MyPoints:colors:start: Duration(ns) [ rerun:archetype:example.MyPoints rerun:component:example.MyPoints:colors rerun:component_type:example.MyColor - rerun:index:elapsed + rerun:index:elapsed time ] -elapsed:example_MyPoints:points:end: Duration(ns) [ +elapsed_time:example_MyPoints:points:end: Duration(ns) [ rerun:archetype:example.MyPoints rerun:component:example.MyPoints:points rerun:component_type:example.MyPoint - rerun:index:elapsed + rerun:index:elapsed time ] -elapsed:example_MyPoints:points:start: Duration(ns) [ +elapsed_time:example_MyPoints:points:start: Duration(ns) [ rerun:archetype:example.MyPoints rerun:component:example.MyPoints:points rerun:component_type:example.MyPoint - rerun:index:elapsed + rerun:index:elapsed time ] -elapsed:start: Duration(ns) [ - rerun:index:elapsed +elapsed_time:start: Duration(ns) [ + rerun:index:elapsed time ] example_MyPoints:colors:has_static_data: bool [ rerun:archetype:example.MyPoints diff --git a/crates/top/rerun/src/commands/rrd/filter.rs b/crates/top/rerun/src/commands/rrd/filter.rs index 9cddaa671bb8..b4927523cfc8 100644 --- a/crates/top/rerun/src/commands/rrd/filter.rs +++ b/crates/top/rerun/src/commands/rrd/filter.rs @@ -176,7 +176,7 @@ impl FilterCommand { .join() .map_err(|err| anyhow::anyhow!("Unknown error: {err:?}"))??; // NOLINT: there is no `Display` for this `err` - let rrds_in_size = rx_size_bytes.recv().ok(); + let rrds_in_size = rx_size_bytes.recv().ok().map(|(size, _footers)| size); let size_reduction = if let (Some(rrds_in_size), rrd_out_size) = (rrds_in_size, rrd_out_size) { format!( diff --git a/crates/top/rerun/src/commands/rrd/merge_compact.rs b/crates/top/rerun/src/commands/rrd/merge_compact.rs index 932e1eecb28c..90d333240fe3 100644 --- a/crates/top/rerun/src/commands/rrd/merge_compact.rs +++ b/crates/top/rerun/src/commands/rrd/merge_compact.rs @@ -321,7 +321,7 @@ fn merge_and_compact( rrd_out.flush().context("couldn't flush output")?; - let rrds_in_size = rx_size_bytes.recv().ok(); + let rrds_in_size = rx_size_bytes.recv().ok().map(|(size, _footers)| size); let num_chunks_reduction = format!( "-{:3.3}%", 100.0 - num_chunks_after as f64 / (num_chunks_before as f64 + f64::EPSILON) * 100.0 diff --git a/crates/top/rerun/src/commands/rrd/print.rs b/crates/top/rerun/src/commands/rrd/print.rs index 70656a12c60d..dda57af44bf2 100644 --- a/crates/top/rerun/src/commands/rrd/print.rs +++ b/crates/top/rerun/src/commands/rrd/print.rs @@ -2,6 +2,7 @@ use anyhow::Context as _; use arrow::array::RecordBatch; use itertools::Itertools as _; +use re_arrow_util::RecordBatchExt as _; use re_byte_size::SizeBytes as _; use re_log_types::{LogMsg, SetStoreInfo}; use re_sdk::EntityPath; @@ -43,13 +44,17 @@ pub struct PrintCommand { #[clap(long, default_missing_value="true", num_args=0..=1)] full_metadata: Option, - /// Transpose record batches before printing them? - #[clap(long, default_missing_value="true", num_args=0..=1)] - transposed: Option, - /// Show only chunks belonging to this entity. #[clap(long)] entity: Option, + + /// If true, displays all the parsed footers at the end. + #[clap(long, default_missing_value="true", num_args=0..=1)] + footers: Option, + + /// Transpose record batches before printing them? + #[clap(long, default_missing_value="true", num_args=0..=1)] + transposed: Option, } impl PrintCommand { @@ -60,15 +65,17 @@ impl PrintCommand { verbose, migrate, full_metadata, - transposed, entity, + footers, + transposed, } = self; let continue_on_error = continue_on_error.unwrap_or(true); let migrate = migrate.unwrap_or(true); - let transposed = transposed.unwrap_or(false); let full_metadata = full_metadata.unwrap_or(false); let entity = entity.map(|e| EntityPath::parse_forgiving(&e)); + let footers = footers.unwrap_or(false); + let transposed = transposed.unwrap_or(false); let options = Options { verbose, @@ -87,7 +94,7 @@ impl PrintCommand { ); } - let (rx, _) = read_rrd_streams_from_file_or_stdin(&path_to_input_rrds); + let (rx, rx_done) = read_rrd_streams_from_file_or_stdin(&path_to_input_rrds); for (_source, res) in rx { let mut is_success = true; @@ -113,6 +120,35 @@ impl PrintCommand { } } + if footers { + for (_, rrd_manifests) in rx_done { + for (source, mut rrd_manifest) in rrd_manifests? { + // Just to be nice: this will display the origin of the data in the header. + rrd_manifest + .data + .schema_metadata_mut() + .insert("rerun:source".to_owned(), source.to_string()); + + // Drop all per-entity and/or per-component columns to keep things readable. + // + // TODO(cmc): more config flags for columns to show etc. + let filtered = rrd_manifest + .data + .filter_columns_by(|f| f.name().starts_with("chunk_"))?; + + let formatted = re_arrow_util::format_record_batch_opts( + &filtered, + &re_arrow_util::RecordBatchFormatOpts { + max_cell_content_width: 32, + ..Default::default() + }, + ); + + println!("{formatted}"); + } + } + } + Ok(()) } } diff --git a/crates/top/rerun/src/commands/rrd/route.rs b/crates/top/rerun/src/commands/rrd/route.rs index 9dd6bf411f9c..eacf811d9911 100644 --- a/crates/top/rerun/src/commands/rrd/route.rs +++ b/crates/top/rerun/src/commands/rrd/route.rs @@ -35,6 +35,16 @@ pub struct RouteCommand { /// output. #[clap(long = "recording-id")] recording_id: Option, + + /// If set, this will compute an RRD footer with the appropriate manifest for the routed data. + /// + /// By default, `rerun rrd route` will always drop all existing RRD manifests when routing data, + /// as doing so invalidates their contents. + /// This flag makes it possible to recompute an RRD manifest for the routed data, but beware + /// that it has to decode the data, which means it is A) much slower and B) will migrate + /// the data to the latest Sorbet specification automatically. + #[clap(long = "recompute-manifests", default_value_t = false)] + recompute_manifests: bool, } struct Rewrites { @@ -50,6 +60,7 @@ impl RouteCommand { continue_on_error, application_id, recording_id, + recompute_manifests, } = self; let rewrites = Rewrites { @@ -70,6 +81,7 @@ impl RouteCommand { if let Some(path) = path_to_output_rrd { let writer = BufWriter::new(File::create(path)?); process_messages( + *recompute_manifests, &rewrites, *continue_on_error, writer, @@ -81,6 +93,7 @@ impl RouteCommand { let lock = stdout.lock(); let writer = BufWriter::new(lock); process_messages( + *recompute_manifests, &rewrites, *continue_on_error, writer, @@ -93,7 +106,9 @@ impl RouteCommand { } } +#[expect(clippy::fn_params_excessive_bools)] fn process_messages( + recompute_manifests: bool, rewrites: &Rewrites, continue_on_error: bool, writer: W, @@ -105,6 +120,9 @@ fn process_messages( let mut num_unexpected_msgs = 0; let mut num_blueprint_activations = 0; + // Only used if recomputing manifests. + let mut app_id_injector = re_log_encoding::CachingApplicationIdInjector::default(); + // TODO(grtlr): encoding should match the original (just like in `rrd stats`). let options = re_log_encoding::rrd::EncodingOptions::PROTOBUF_COMPRESSED; let version = re_build_info::CrateVersion::LOCAL; @@ -173,11 +191,18 @@ fn process_messages( } } - // Safety: we're just forwarding an existing message, we didn't change its payload - // in any meaningful way. - #[expect(unsafe_code)] - unsafe { - encoder.append_transport(&msg)?; + if recompute_manifests { + use re_log_encoding::ToApplication as _; + let msg = msg.to_application((&mut app_id_injector, None))?; + encoder.append(&msg)?; + } else { + // Safety: we're just forwarding an existing message, we didn't change its payload + // in any meaningful way. + #[expect(unsafe_code)] + unsafe { + // Reminder: this will implicitly discard RRD footers. + encoder.append_transport(&msg)?; + } } } Err(err) => { diff --git a/crates/top/rerun/src/commands/stdio.rs b/crates/top/rerun/src/commands/stdio.rs index f50eb9974d81..ae4e2333034b 100644 --- a/crates/top/rerun/src/commands/stdio.rs +++ b/crates/top/rerun/src/commands/stdio.rs @@ -5,6 +5,7 @@ use crossbeam::channel; use itertools::Itertools as _; use re_chunk::external::crossbeam; +use re_log_encoding::RrdManifest; // --- @@ -30,17 +31,19 @@ impl std::fmt::Display for InputSource { /// * The first channel contains both the successfully decoded data, if any, as well as any /// errors faced during processing. /// * The second channel, which will fire only once, after all processing is done, indicates the -/// total number of bytes processed. +/// total number of bytes processed, and returns all RRD manifests that were parsed from footers +/// in the underlying stream. /// /// This function is best-effort: it will try to make progress even in the face of errors. /// It is up to the user to decide whether and when to stop. /// /// This function is capable of decoding multiple independent recordings from a single stream. +#[expect(clippy::type_complexity)] // internal private API for the CLI impl pub fn read_rrd_streams_from_file_or_stdin( paths: &[String], ) -> ( channel::Receiver<(InputSource, anyhow::Result)>, - channel::Receiver, + channel::Receiver<(u64, anyhow::Result>)>, ) { read_any_rrd_streams_from_file_or_stdin::(paths) } @@ -55,7 +58,8 @@ pub fn read_rrd_streams_from_file_or_stdin( /// * The first channel contains both the successfully decoded data, if any, as well as any /// errors faced during processing. /// * The second channel, which will fire only once, after all processing is done, indicates the -/// total number of bytes processed. +/// total number of bytes processed, and returns all RRD manifests that were parsed from footers +/// in the underlying stream. /// /// This function is best-effort: it will try to make progress even in the face of errors. /// It is up to the user to decide whether and when to stop. @@ -68,6 +72,7 @@ pub fn read_rrd_streams_from_file_or_stdin( // TODO(ab): For pre-0.25 legacy data with `StoreId` missing their application id, the migration // in `Decoder` requires `SetStoreInfo` to arrive before the corresponding `ArrowMsg`. Ideally // this tool would cache orphan `ArrowMsg` until a matching `SetStoreInfo` arrives. +#[expect(clippy::type_complexity)] // internal private API for the CLI impl pub fn read_raw_rrd_streams_from_file_or_stdin( paths: &[String], ) -> ( @@ -75,18 +80,19 @@ pub fn read_raw_rrd_streams_from_file_or_stdin( InputSource, anyhow::Result, )>, - channel::Receiver, + channel::Receiver<(u64, anyhow::Result>)>, ) { read_any_rrd_streams_from_file_or_stdin::(paths) } +#[expect(clippy::type_complexity)] // internal private API for the CLI impl fn read_any_rrd_streams_from_file_or_stdin< T: re_log_encoding::DecoderEntrypoint + Send + 'static, >( paths: &[String], ) -> ( channel::Receiver<(InputSource, anyhow::Result)>, - channel::Receiver, + channel::Receiver<(u64, anyhow::Result>)>, ) { let path_to_input_rrds = paths .iter() @@ -95,26 +101,32 @@ fn read_any_rrd_streams_from_file_or_stdin< .collect_vec(); // TODO(cmc): might want to make this configurable at some point. - let (tx, rx) = crossbeam::channel::bounded(100); - let (tx_size_bytes, rx_size_bytes) = crossbeam::channel::bounded(1); + let (tx_msgs, rx_msgs) = crossbeam::channel::bounded(100); + let (tx_metadata, rx_metadata) = crossbeam::channel::bounded(1); _ = std::thread::Builder::new() .name("rerun-rrd-in".to_owned()) .spawn(move || { + let mut rrd_manifests = Ok(Vec::new()); let mut size_bytes = 0; if path_to_input_rrds.is_empty() { // stdin + let source = InputSource::Stdin; let stdin = std::io::BufReader::new(std::io::stdin().lock()); let mut decoder = re_log_encoding::Decoder::decode_lazy(stdin); for res in &mut decoder { let res = res.context("couldn't decode message from stdin -- skipping"); - tx.send((InputSource::Stdin, res)).ok(); + tx_msgs.send((source.clone(), res)).ok(); } size_bytes += decoder.num_bytes_processed(); + rrd_manifests = decoder + .rrd_manifests() + .context("couldn't decode footers") + .map(|manifests| manifests.into_iter().map(|m| (source.clone(), m)).collect()); } else { // file(s) @@ -124,28 +136,35 @@ fn read_any_rrd_streams_from_file_or_stdin< { Ok(file) => file, Err(err) => { - tx.send((InputSource::File(rrd_path.clone()), Err(err))) + tx_msgs + .send((InputSource::File(rrd_path.clone()), Err(err))) .ok(); continue; } }; + let source = InputSource::File(rrd_path.clone()); let rrd_file = std::io::BufReader::new(rrd_file); - let mut messages = re_log_encoding::Decoder::decode_lazy(rrd_file); - - for res in &mut messages { + let mut decoder = re_log_encoding::Decoder::decode_lazy(rrd_file); + for res in &mut decoder { let res = res.context("decode rrd message").with_context(|| { format!("couldn't decode message {rrd_path:?} -- skipping") }); - tx.send((InputSource::File(rrd_path.clone()), res)).ok(); + tx_msgs.send((source.clone(), res)).ok(); } - size_bytes += messages.num_bytes_processed(); + size_bytes += decoder.num_bytes_processed(); + rrd_manifests = decoder + .rrd_manifests() + .context("couldn't decode footers") + .map(|manifests| { + manifests.into_iter().map(|m| (source.clone(), m)).collect() + }); } } - tx_size_bytes.send(size_bytes).ok(); + tx_metadata.send((size_bytes, rrd_manifests)).ok(); }); - (rx, rx_size_bytes) + (rx_msgs, rx_metadata) } diff --git a/docs/content/reference/cli.md b/docs/content/reference/cli.md index 649877b4bf1a..6011cd663aac 100644 --- a/docs/content/reference/cli.md +++ b/docs/content/reference/cli.md @@ -537,12 +537,15 @@ Example: `rerun rrd print /my/recordings/*.rrd` * `--full-metadata ` > If true, includes `rerun.` prefixes on keys. -* `--transposed ` -> Transpose record batches before printing them? - * `--entity ` > Show only chunks belonging to this entity. +* `--footers ` +> If true, displays all the parsed footers at the end. + +* `--transposed ` +> Transpose record batches before printing them? + ## rerun rrd route Manipulates the metadata of log message streams without decoding the payloads. @@ -576,6 +579,13 @@ Note: Because the payload of the messages is never decoded, no migration or veri > > When this flag is set and multiple input .rdd files are specified, blueprint activation commands will be dropped from the resulting output. +* `--recompute-manifests ` +> If set, this will compute an RRD footer with the appropriate manifest for the routed data. +> +> By default, `rerun rrd route` will always drop all existing RRD manifests when routing data, as doing so invalidates their contents. This flag makes it possible to recompute an RRD manifest for the routed data, but beware that it has to decode the data, which means it is A) much slower and B) will migrate the data to the latest Sorbet specification automatically. +> +> [Default: `false`] + ## rerun rrd stats Compute important statistics for one or more .rrd/.rbl files/streams.