diff --git a/crates/iceberg/src/catalog/memory/catalog.rs b/crates/iceberg/src/catalog/memory/catalog.rs index d1d361c7a1..12d18b9f36 100644 --- a/crates/iceberg/src/catalog/memory/catalog.rs +++ b/crates/iceberg/src/catalog/memory/catalog.rs @@ -20,17 +20,16 @@ use std::collections::HashMap; use async_trait::async_trait; -use futures::lock::Mutex; +use futures::lock::{Mutex, MutexGuard}; use itertools::Itertools; -use uuid::Uuid; use super::namespace_state::NamespaceState; use crate::io::FileIO; use crate::spec::{TableMetadata, TableMetadataBuilder}; use crate::table::Table; use crate::{ - Catalog, Error, ErrorKind, Namespace, NamespaceIdent, Result, TableCommit, TableCreation, - TableIdent, + Catalog, Error, ErrorKind, MetadataLocation, Namespace, NamespaceIdent, Result, TableCommit, + TableCreation, TableIdent, }; /// namespace `location` property @@ -45,7 +44,7 @@ pub struct MemoryCatalog { } impl MemoryCatalog { - /// Creates an memory catalog. + /// Creates a memory catalog. pub fn new(file_io: FileIO, warehouse_location: Option) -> Self { Self { root_namespace_state: Mutex::new(NamespaceState::default()), @@ -53,6 +52,23 @@ impl MemoryCatalog { warehouse_location, } } + + /// Loads a table from the locked namespace state. + async fn load_table_from_locked_state( + &self, + table_ident: &TableIdent, + root_namespace_state: &MutexGuard<'_, NamespaceState>, + ) -> Result { + let metadata_location = root_namespace_state.get_existing_table_location(table_ident)?; + let metadata = TableMetadata::read_from(&self.file_io, metadata_location).await?; + + Table::builder() + .identifier(table_ident.clone()) + .metadata(metadata) + .metadata_location(metadata_location.to_string()) + .file_io(self.file_io.clone()) + .build() + } } #[async_trait] @@ -203,12 +219,7 @@ impl Catalog for MemoryCatalog { let metadata = TableMetadataBuilder::from_table_creation(table_creation)? .build()? .metadata; - let metadata_location = format!( - "{}/metadata/{}-{}.metadata.json", - &location, - 0, - Uuid::new_v4() - ); + let metadata_location = MetadataLocation::new_with_table_location(location).to_string(); metadata.write_to(&self.file_io, &metadata_location).await?; @@ -226,15 +237,8 @@ impl Catalog for MemoryCatalog { async fn load_table(&self, table_ident: &TableIdent) -> Result
{ let root_namespace_state = self.root_namespace_state.lock().await; - let metadata_location = root_namespace_state.get_existing_table_location(table_ident)?; - let metadata = TableMetadata::read_from(&self.file_io, metadata_location).await?; - - Table::builder() - .file_io(self.file_io.clone()) - .metadata_location(metadata_location.clone()) - .metadata(metadata) - .identifier(table_ident.clone()) - .build() + self.load_table_from_locked_state(table_ident, &root_namespace_state) + .await } /// Drop a table from the catalog. @@ -289,12 +293,30 @@ impl Catalog for MemoryCatalog { .build() } - /// Update a table to the catalog. - async fn update_table(&self, _commit: TableCommit) -> Result
{ - Err(Error::new( - ErrorKind::FeatureUnsupported, - "MemoryCatalog does not currently support updating tables.", - )) + /// Update a table in the catalog. + async fn update_table(&self, commit: TableCommit) -> Result
{ + let mut root_namespace_state = self.root_namespace_state.lock().await; + + let current_table = self + .load_table_from_locked_state(commit.identifier(), &root_namespace_state) + .await?; + + // Apply TableCommit to get staged table + let staged_table = commit.apply(current_table)?; + + // Write table metadata to the new location + staged_table + .metadata() + .write_to( + staged_table.file_io(), + staged_table.metadata_location_result()?, + ) + .await?; + + // Flip the pointer to reference the new metadata file. + let updated_table = root_namespace_state.commit_table_update(staged_table)?; + + Ok(updated_table) } } @@ -303,6 +325,7 @@ mod tests { use std::collections::HashSet; use std::hash::Hash; use std::iter::FromIterator; + use std::vec; use regex::Regex; use tempfile::TempDir; @@ -310,6 +333,7 @@ mod tests { use super::*; use crate::io::FileIOBuilder; use crate::spec::{NestedField, PartitionSpec, PrimitiveType, Schema, SortOrder, Type}; + use crate::transaction::{ApplyTransactionAction, Transaction}; fn temp_path() -> String { let temp_dir = TempDir::new().unwrap(); @@ -335,7 +359,7 @@ mod tests { } } - fn to_set(vec: Vec) -> HashSet { + fn to_set(vec: Vec) -> HashSet { HashSet::from_iter(vec) } @@ -348,8 +372,8 @@ mod tests { .unwrap() } - async fn create_table(catalog: &C, table_ident: &TableIdent) { - let _ = catalog + async fn create_table(catalog: &C, table_ident: &TableIdent) -> Table { + catalog .create_table( &table_ident.namespace, TableCreation::builder() @@ -358,7 +382,7 @@ mod tests { .build(), ) .await - .unwrap(); + .unwrap() } async fn create_tables(catalog: &C, table_idents: Vec<&TableIdent>) { @@ -367,6 +391,14 @@ mod tests { } } + async fn create_table_with_namespace(catalog: &C) -> Table { + let namespace_ident = NamespaceIdent::new("abc".into()); + create_namespace(catalog, &namespace_ident).await; + + let table_ident = TableIdent::new(namespace_ident, "test".to_string()); + create_table(catalog, &table_ident).await + } + fn assert_table_eq(table: &Table, expected_table_ident: &TableIdent, expected_schema: &Schema) { assert_eq!(table.identifier(), expected_table_ident); @@ -411,7 +443,12 @@ mod tests { fn assert_table_metadata_location_matches(table: &Table, regex_str: &str) { let actual = table.metadata_location().unwrap().to_string(); let regex = Regex::new(regex_str).unwrap(); - assert!(regex.is_match(&actual)) + assert!( + regex.is_match(&actual), + "Expected metadata location to match regex, but got location: {} and regex: {}", + actual, + regex + ) } #[tokio::test] @@ -1063,7 +1100,7 @@ mod tests { let table_name = "tbl1"; let expected_table_ident = TableIdent::new(namespace_ident.clone(), table_name.into()); let expected_table_metadata_location_regex = format!( - "^{}/tbl1/metadata/0-{}.metadata.json$", + "^{}/tbl1/metadata/00000-{}.metadata.json$", namespace_location, UUID_REGEX_STR, ); @@ -1116,7 +1153,7 @@ mod tests { let expected_table_ident = TableIdent::new(nested_namespace_ident.clone(), table_name.into()); let expected_table_metadata_location_regex = format!( - "^{}/tbl1/metadata/0-{}.metadata.json$", + "^{}/tbl1/metadata/00000-{}.metadata.json$", nested_namespace_location, UUID_REGEX_STR, ); @@ -1157,7 +1194,7 @@ mod tests { let table_name = "tbl1"; let expected_table_ident = TableIdent::new(namespace_ident.clone(), table_name.into()); let expected_table_metadata_location_regex = format!( - "^{}/a/tbl1/metadata/0-{}.metadata.json$", + "^{}/a/tbl1/metadata/00000-{}.metadata.json$", warehouse_location, UUID_REGEX_STR ); @@ -1205,7 +1242,7 @@ mod tests { let expected_table_ident = TableIdent::new(nested_namespace_ident.clone(), table_name.into()); let expected_table_metadata_location_regex = format!( - "^{}/a/b/tbl1/metadata/0-{}.metadata.json$", + "^{}/a/b/tbl1/metadata/00000-{}.metadata.json$", warehouse_location, UUID_REGEX_STR ); @@ -1705,7 +1742,7 @@ mod tests { .unwrap_err() .to_string(), format!( - "TableAlreadyExists => Cannot create table {:? }. Table already exists.", + "TableAlreadyExists => Cannot create table {:?}. Table already exists.", &dst_table_ident ), ); @@ -1754,4 +1791,87 @@ mod tests { metadata_location ); } + + #[tokio::test] + async fn test_update_table() { + let catalog = new_memory_catalog(); + + let table = create_table_with_namespace(&catalog).await; + + // Assert the table doesn't contain the update yet + assert!(!table.metadata().properties().contains_key("key")); + + // Update table metadata + let tx = Transaction::new(&table); + let updated_table = tx + .update_table_properties() + .set("key".to_string(), "value".to_string()) + .apply(tx) + .unwrap() + .commit(&catalog) + .await + .unwrap(); + + assert_eq!( + updated_table.metadata().properties().get("key").unwrap(), + "value" + ); + + assert_eq!(table.identifier(), updated_table.identifier()); + assert_eq!(table.metadata().uuid(), updated_table.metadata().uuid()); + assert!(table.metadata().last_updated_ms() < updated_table.metadata().last_updated_ms()); + assert_ne!(table.metadata_location(), updated_table.metadata_location()); + + assert!( + table.metadata().metadata_log().len() < updated_table.metadata().metadata_log().len() + ); + } + + #[tokio::test] + async fn test_update_table_fails_if_table_doesnt_exist() { + let catalog = new_memory_catalog(); + + let namespace_ident = NamespaceIdent::new("a".into()); + create_namespace(&catalog, &namespace_ident).await; + + // This table is not known to the catalog. + let table_ident = TableIdent::new(namespace_ident, "test".to_string()); + let table = build_table(table_ident); + + let tx = Transaction::new(&table); + let err = tx + .update_table_properties() + .set("key".to_string(), "value".to_string()) + .apply(tx) + .unwrap() + .commit(&catalog) + .await + .unwrap_err(); + assert_eq!(err.kind(), ErrorKind::TableNotFound); + } + + fn build_table(ident: TableIdent) -> Table { + let file_io = FileIOBuilder::new_fs_io().build().unwrap(); + + let temp_dir = TempDir::new().unwrap(); + let location = temp_dir.path().to_str().unwrap().to_string(); + + let table_creation = TableCreation::builder() + .name(ident.name().to_string()) + .schema(simple_table_schema()) + .location(location) + .build(); + let metadata = TableMetadataBuilder::from_table_creation(table_creation) + .unwrap() + .build() + .unwrap() + .metadata; + + Table::builder() + .identifier(ident) + .metadata(metadata) + .file_io(file_io) + .build() + .unwrap() + } } diff --git a/crates/iceberg/src/catalog/memory/namespace_state.rs b/crates/iceberg/src/catalog/memory/namespace_state.rs index 2ab00e710a..2fc481b767 100644 --- a/crates/iceberg/src/catalog/memory/namespace_state.rs +++ b/crates/iceberg/src/catalog/memory/namespace_state.rs @@ -19,6 +19,7 @@ use std::collections::{HashMap, hash_map}; use itertools::Itertools; +use crate::table::Table; use crate::{Error, ErrorKind, NamespaceIdent, Result, TableIdent}; // Represents the state of a namespace @@ -259,7 +260,7 @@ impl NamespaceState { match namespace.table_metadata_locations.get(table_ident.name()) { None => no_such_table_err(table_ident), - Some(table_metadadata_location) => Ok(table_metadadata_location), + Some(table_metadata_location) => Ok(table_metadata_location), } } @@ -296,4 +297,22 @@ impl NamespaceState { Some(metadata_location) => Ok(metadata_location), } } + + /// Updates the metadata location of the given table or returns an error if it doesn't exist + pub(crate) fn commit_table_update(&mut self, staged_table: Table) -> Result
{ + let namespace = self.get_mut_namespace(staged_table.identifier().namespace())?; + + let _ = namespace + .table_metadata_locations + .insert( + staged_table.identifier().name().to_string(), + staged_table.metadata_location_result()?.to_string(), + ) + .ok_or(Error::new( + ErrorKind::TableNotFound, + format!("No such table: {:?}", staged_table.identifier()), + ))?; + + Ok(staged_table) + } } diff --git a/crates/iceberg/src/catalog/metadata_location.rs b/crates/iceberg/src/catalog/metadata_location.rs new file mode 100644 index 0000000000..8cb5cb11d2 --- /dev/null +++ b/crates/iceberg/src/catalog/metadata_location.rs @@ -0,0 +1,239 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::fmt::Display; +use std::str::FromStr; + +use uuid::Uuid; + +use crate::{Error, ErrorKind, Result}; + +/// Helper for parsing a location of the format: `/metadata/-.metadata.json` +#[derive(Clone, Debug, PartialEq)] +pub struct MetadataLocation { + table_location: String, + version: i32, + id: Uuid, +} + +impl MetadataLocation { + /// Creates a completely new metadata location starting at version 0. + /// Only used for creating a new table. For updates, see `with_next_version`. + pub fn new_with_table_location(table_location: impl ToString) -> Self { + Self { + table_location: table_location.to_string(), + version: 0, + id: Uuid::new_v4(), + } + } + + /// Creates a new metadata location for an updated metadata file. + pub fn with_next_version(&self) -> Self { + Self { + table_location: self.table_location.clone(), + version: self.version + 1, + id: Uuid::new_v4(), + } + } + + fn parse_metadata_path_prefix(path: &str) -> Result { + let prefix = path.strip_suffix("/metadata").ok_or(Error::new( + ErrorKind::Unexpected, + format!( + "Metadata location not under \"/metadata\" subdirectory: {}", + path + ), + ))?; + + Ok(prefix.to_string()) + } + + /// Parses a file name of the format `-.metadata.json`. + fn parse_file_name(file_name: &str) -> Result<(i32, Uuid)> { + let (version, id) = file_name + .strip_suffix(".metadata.json") + .ok_or(Error::new( + ErrorKind::Unexpected, + format!("Invalid metadata file ending: {}", file_name), + ))? + .split_once('-') + .ok_or(Error::new( + ErrorKind::Unexpected, + format!("Invalid metadata file name format: {}", file_name), + ))?; + + Ok((version.parse::()?, Uuid::parse_str(id)?)) + } +} + +impl Display for MetadataLocation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}/metadata/{:0>5}-{}.metadata.json", + self.table_location, self.version, self.id + ) + } +} + +impl FromStr for MetadataLocation { + type Err = Error; + + fn from_str(s: &str) -> Result { + let (path, file_name) = s.rsplit_once('/').ok_or(Error::new( + ErrorKind::Unexpected, + format!("Invalid metadata location: {}", s), + ))?; + + let prefix = Self::parse_metadata_path_prefix(path)?; + let (version, id) = Self::parse_file_name(file_name)?; + + Ok(MetadataLocation { + table_location: prefix, + version, + id, + }) + } +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use uuid::Uuid; + + use crate::MetadataLocation; + + #[test] + fn test_metadata_location_from_string() { + let test_cases = vec![ + // No prefix + ( + "/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Ok(MetadataLocation { + table_location: "".to_string(), + version: 1234567, + id: Uuid::from_str("2cd22b57-5127-4198-92ba-e4e67c79821b").unwrap(), + }), + ), + // Some prefix + ( + "/abc/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Ok(MetadataLocation { + table_location: "/abc".to_string(), + version: 1234567, + id: Uuid::from_str("2cd22b57-5127-4198-92ba-e4e67c79821b").unwrap(), + }), + ), + // Longer prefix + ( + "/abc/def/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Ok(MetadataLocation { + table_location: "/abc/def".to_string(), + version: 1234567, + id: Uuid::from_str("2cd22b57-5127-4198-92ba-e4e67c79821b").unwrap(), + }), + ), + // Prefix with special characters + ( + "https://127.0.0.1/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Ok(MetadataLocation { + table_location: "https://127.0.0.1".to_string(), + version: 1234567, + id: Uuid::from_str("2cd22b57-5127-4198-92ba-e4e67c79821b").unwrap(), + }), + ), + // Another id + ( + "/abc/metadata/1234567-81056704-ce5b-41c4-bb83-eb6408081af6.metadata.json", + Ok(MetadataLocation { + table_location: "/abc".to_string(), + version: 1234567, + id: Uuid::from_str("81056704-ce5b-41c4-bb83-eb6408081af6").unwrap(), + }), + ), + // Version 0 + ( + "/abc/metadata/00000-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Ok(MetadataLocation { + table_location: "/abc".to_string(), + version: 0, + id: Uuid::from_str("2cd22b57-5127-4198-92ba-e4e67c79821b").unwrap(), + }), + ), + // Negative version + ( + "/metadata/-123-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Err("".to_string()), + ), + // Invalid uuid + ( + "/metadata/1234567-no-valid-id.metadata.json", + Err("".to_string()), + ), + // Non-numeric version + ( + "/metadata/noversion-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Err("".to_string()), + ), + // No /metadata subdirectory + ( + "/wrongsubdir/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + Err("".to_string()), + ), + // No .metadata.json suffix + ( + "/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata", + Err("".to_string()), + ), + ( + "/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.wrong.file", + Err("".to_string()), + ), + ]; + + for (input, expected) in test_cases { + match MetadataLocation::from_str(input) { + Ok(metadata_location) => { + assert!(expected.is_ok()); + assert_eq!(metadata_location, expected.unwrap()); + } + Err(_) => assert!(expected.is_err()), + } + } + } + + #[test] + fn test_metadata_location_with_next_version() { + let test_cases = vec![ + MetadataLocation::new_with_table_location("/abc"), + MetadataLocation::from_str( + "/abc/def/metadata/1234567-2cd22b57-5127-4198-92ba-e4e67c79821b.metadata.json", + ) + .unwrap(), + ]; + + for input in test_cases { + let next = MetadataLocation::from_str(&input.to_string()) + .unwrap() + .with_next_version(); + assert_eq!(next.table_location, input.table_location); + assert_eq!(next.version, input.version + 1); + assert_ne!(next.id, input.id); + } + } +} diff --git a/crates/iceberg/src/catalog/mod.rs b/crates/iceberg/src/catalog/mod.rs index 73c6c10b74..a468edc475 100644 --- a/crates/iceberg/src/catalog/mod.rs +++ b/crates/iceberg/src/catalog/mod.rs @@ -18,17 +18,20 @@ //! Catalog API for Apache Iceberg pub mod memory; +mod metadata_location; use std::collections::HashMap; use std::fmt::{Debug, Display}; use std::future::Future; use std::mem::take; use std::ops::Deref; +use std::str::FromStr; use std::sync::Arc; use _serde::deserialize_snapshot; use async_trait::async_trait; pub use memory::MemoryCatalog; +pub use metadata_location::*; #[cfg(test)] use mockall::automock; use serde_derive::{Deserialize, Serialize}; @@ -335,14 +338,26 @@ impl TableCommit { requirement.check(Some(table.metadata()))?; } - // apply updates to metadata builder - let mut metadata_builder = table.metadata().clone().into_builder(None); + // get current metadata location + let current_metadata_location = table.metadata_location_result()?; + // apply updates to metadata builder + let mut metadata_builder = table + .metadata() + .clone() + .into_builder(Some(current_metadata_location.to_string())); for update in self.updates { metadata_builder = update.apply(metadata_builder)?; } - Ok(table.with_metadata(Arc::new(metadata_builder.build()?.metadata))) + // Bump the version of metadata + let new_metadata_location = MetadataLocation::from_str(current_metadata_location)? + .with_next_version() + .to_string(); + + Ok(table + .with_metadata(Arc::new(metadata_builder.build()?.metadata)) + .with_metadata_location(new_metadata_location)) } } @@ -2172,7 +2187,7 @@ mod tests { Table::builder() .metadata(resp) - .metadata_location("s3://bucket/test/location/metadata/v2.json".to_string()) + .metadata_location("s3://bucket/test/location/metadata/00000-8a62c37d-4573-4021-952a-c0baef7d21d0.metadata.json".to_string()) .identifier(TableIdent::from_strs(["ns1", "test1"]).unwrap()) .file_io(FileIOBuilder::new("memory").build().unwrap()) .build() @@ -2181,7 +2196,7 @@ mod tests { let updates = vec![ TableUpdate::SetLocation { - location: "s3://bucket/test/new_location/metadata/v2.json".to_string(), + location: "s3://bucket/test/new_location/data".to_string(), }, TableUpdate::SetProperties { updates: vec![ @@ -2214,9 +2229,17 @@ mod tests { "v2" ); + // metadata version should be bumped + assert!( + updated_table + .metadata_location() + .unwrap() + .starts_with("s3://bucket/test/location/metadata/00001-") + ); + assert_eq!( updated_table.metadata().location, - "s3://bucket/test/new_location/metadata/v2.json".to_string() - ) + "s3://bucket/test/new_location/data", + ); } } diff --git a/crates/iceberg/src/error.rs b/crates/iceberg/src/error.rs index 2781cf6788..7ae01f1b51 100644 --- a/crates/iceberg/src/error.rs +++ b/crates/iceberg/src/error.rs @@ -348,6 +348,12 @@ define_from_err!( "handling invalid utf-8 characters" ); +define_from_err!( + core::num::ParseIntError, + ErrorKind::Unexpected, + "parsing integer from string" +); + define_from_err!( std::array::TryFromSliceError, ErrorKind::DataInvalid, diff --git a/crates/iceberg/src/spec/table_metadata.rs b/crates/iceberg/src/spec/table_metadata.rs index 0f0854f7fc..3b89f54674 100644 --- a/crates/iceberg/src/spec/table_metadata.rs +++ b/crates/iceberg/src/spec/table_metadata.rs @@ -177,7 +177,7 @@ pub struct TableMetadata { /// that encodes changes to the previous metadata files for the table. /// Each time a new metadata file is created, a new entry of the /// previous metadata file location should be added to the list. - /// Tables can be configured to remove oldest metadata log entries and + /// Tables can be configured to remove the oldest metadata log entries and /// keep a fixed-size log of the most recent entries after a commit. pub(crate) metadata_log: Vec, @@ -3078,7 +3078,7 @@ mod tests { } #[tokio::test] - async fn test_table_metadata_io_read_write() { + async fn test_table_metadata_read_write() { // Create a temporary directory for our test let temp_dir = TempDir::new().unwrap(); let temp_path = temp_dir.path().to_str().unwrap(); @@ -3111,7 +3111,7 @@ mod tests { } #[tokio::test] - async fn test_table_metadata_io_read_nonexistent_file() { + async fn test_table_metadata_read_nonexistent_file() { // Create a FileIO instance let file_io = FileIOBuilder::new_fs_io().build().unwrap(); diff --git a/crates/iceberg/src/table.rs b/crates/iceberg/src/table.rs index 7534143c26..d4e696ce84 100644 --- a/crates/iceberg/src/table.rs +++ b/crates/iceberg/src/table.rs @@ -168,6 +168,12 @@ impl Table { self } + /// Sets the [`Table`] metadata location and returns an updated instance. + pub(crate) fn with_metadata_location(mut self, metadata_location: String) -> Self { + self.metadata_location = Some(metadata_location); + self + } + /// Returns a TableBuilder to build a table pub fn builder() -> TableBuilder { TableBuilder::new() @@ -192,6 +198,17 @@ impl Table { self.metadata_location.as_deref() } + /// Returns current metadata location in a result. + pub fn metadata_location_result(&self) -> Result<&str> { + self.metadata_location.as_deref().ok_or(Error::new( + ErrorKind::DataInvalid, + format!( + "Metadata location does not exist for table: {}", + self.identifier + ), + )) + } + /// Returns file io used in this table. pub fn file_io(&self) -> &FileIO { &self.file_io