diff --git a/bindings/python/Cargo.lock b/bindings/python/Cargo.lock index 6ad2d624cf..6c3f79a6cc 100644 --- a/bindings/python/Cargo.lock +++ b/bindings/python/Cargo.lock @@ -2221,6 +2221,7 @@ dependencies = [ "ordered-float 4.6.0", "parquet", "rand 0.8.5", + "reqsign", "reqwest", "roaring", "rust_decimal", diff --git a/bindings/python/src/datafusion_table_provider.rs b/bindings/python/src/datafusion_table_provider.rs index b5e1bf952e..4f06a5799f 100644 --- a/bindings/python/src/datafusion_table_provider.rs +++ b/bindings/python/src/datafusion_table_provider.rs @@ -24,6 +24,7 @@ use iceberg::TableIdent; use iceberg::io::FileIO; use iceberg::table::StaticTable; use iceberg_datafusion::table::IcebergTableProvider; +use iceberg_datafusion::table::static_catalog::StaticCatalog; use pyo3::exceptions::PyRuntimeError; use pyo3::prelude::*; use pyo3::types::PyCapsule; @@ -61,7 +62,7 @@ impl PyIcebergDataFusionTable { .map_err(|e| PyRuntimeError::new_err(format!("Failed to build FileIO: {e}")))?; let static_table = - StaticTable::from_metadata_file(&metadata_location, table_ident, file_io) + StaticTable::from_metadata_file(&metadata_location, table_ident.clone(), file_io) .await .map_err(|e| { PyRuntimeError::new_err(format!("Failed to load static table: {e}")) @@ -69,7 +70,9 @@ impl PyIcebergDataFusionTable { let table = static_table.into_table(); - IcebergTableProvider::try_new_from_table(table) + let static_catalog = Arc::new(StaticCatalog::new(table)); + + IcebergTableProvider::try_new(static_catalog, table_ident) .await .map_err(|e| { PyRuntimeError::new_err(format!("Failed to create table provider: {e}")) diff --git a/crates/iceberg/src/catalog/memory/catalog.rs b/crates/iceberg/src/catalog/memory/catalog.rs index c233ab5925..48db9124a7 100644 --- a/crates/iceberg/src/catalog/memory/catalog.rs +++ b/crates/iceberg/src/catalog/memory/catalog.rs @@ -53,6 +53,19 @@ impl MemoryCatalog { warehouse_location, } } + + /// Register an existing table in the memory catalog. + pub async fn register_existing_table( + &self, + table_ident: &TableIdent, + metadata_location: String, + ) -> Result<()> { + let mut root_namespace_state = self.root_namespace_state.lock().await; + + root_namespace_state.insert_new_table(table_ident, metadata_location.clone())?; + + Ok(()) + } } #[async_trait] diff --git a/crates/integration_tests/tests/shared_tests/datafusion.rs b/crates/integration_tests/tests/shared_tests/datafusion.rs index badb6496fc..efe9737d5e 100644 --- a/crates/integration_tests/tests/shared_tests/datafusion.rs +++ b/crates/integration_tests/tests/shared_tests/datafusion.rs @@ -24,7 +24,7 @@ use datafusion::assert_batches_eq; use datafusion::catalog::TableProvider; use datafusion::error::DataFusionError; use datafusion::prelude::SessionContext; -use iceberg::{Catalog, TableIdent}; +use iceberg::TableIdent; use iceberg_catalog_rest::RestCatalog; use iceberg_datafusion::IcebergTableProvider; use parquet::arrow::PARQUET_FIELD_ID_META_KEY; @@ -36,17 +36,15 @@ async fn test_basic_queries() -> Result<(), DataFusionError> { let fixture = get_shared_containers(); let rest_catalog = RestCatalog::new(fixture.catalog_config.clone()); - let table = rest_catalog - .load_table(&TableIdent::from_strs(["default", "types_test"]).unwrap()) - .await - .unwrap(); - let ctx = SessionContext::new(); let table_provider = Arc::new( - IcebergTableProvider::try_new_from_table(table) - .await - .unwrap(), + IcebergTableProvider::try_new( + Arc::new(rest_catalog), + TableIdent::from_strs(["default", "types_test"]).unwrap(), + ) + .await + .unwrap(), ); let schema = table_provider.schema(); diff --git a/crates/integrations/datafusion/src/schema.rs b/crates/integrations/datafusion/src/schema.rs index 3920ee73ca..e78844c2ad 100644 --- a/crates/integrations/datafusion/src/schema.rs +++ b/crates/integrations/datafusion/src/schema.rs @@ -25,7 +25,7 @@ use datafusion::datasource::TableProvider; use datafusion::error::{DataFusionError, Result as DFResult}; use futures::future::try_join_all; use iceberg::inspect::MetadataTableType; -use iceberg::{Catalog, NamespaceIdent, Result}; +use iceberg::{Catalog, NamespaceIdent, Result, TableIdent}; use crate::table::IcebergTableProvider; @@ -65,7 +65,10 @@ impl IcebergSchemaProvider { let providers = try_join_all( table_names .iter() - .map(|name| IcebergTableProvider::try_new(client.clone(), namespace.clone(), name)) + .map(|name| { + let table_ident = TableIdent::new(namespace.clone(), name.clone()); + IcebergTableProvider::try_new(client.clone(), table_ident) + }) .collect::>(), ) .await?; @@ -113,7 +116,7 @@ impl SchemaProvider for IcebergSchemaProvider { let metadata_table_type = MetadataTableType::try_from(metadata_table_name).map_err(DataFusionError::Plan)?; if let Some(table) = self.tables.get(table_name) { - let metadata_table = table.metadata_table(metadata_table_type); + let metadata_table = table.metadata_table(metadata_table_type).await; return Ok(Some(Arc::new(metadata_table))); } else { return Ok(None); diff --git a/crates/integrations/datafusion/src/table/mod.rs b/crates/integrations/datafusion/src/table/mod.rs index 7f741a534a..f738cc3b19 100644 --- a/crates/integrations/datafusion/src/table/mod.rs +++ b/crates/integrations/datafusion/src/table/mod.rs @@ -16,6 +16,7 @@ // under the License. pub mod metadata_table; +pub mod static_catalog; pub mod table_provider_factory; use std::any::Any; @@ -31,8 +32,9 @@ use datafusion::physical_plan::ExecutionPlan; use iceberg::arrow::schema_to_arrow_schema; use iceberg::inspect::MetadataTableType; use iceberg::table::Table; -use iceberg::{Catalog, Error, ErrorKind, NamespaceIdent, Result, TableIdent}; +use iceberg::{Catalog, Error, ErrorKind, Result, TableIdent}; use metadata_table::IcebergMetadataTableProvider; +use tokio::sync::RwLock; use crate::physical_plan::scan::IcebergTableScan; @@ -40,8 +42,12 @@ use crate::physical_plan::scan::IcebergTableScan; /// managing access to a [`Table`]. #[derive(Debug, Clone)] pub struct IcebergTableProvider { + /// A reference to the catalog that this table belongs to. + catalog: Arc, /// A table in the catalog. - table: Table, + table: Arc>, + /// The identifier to the table in the catalog. + table_identifier: TableIdent, /// Table snapshot id that will be queried via this provider. snapshot_id: Option, /// A reference-counted arrow `Schema`. @@ -49,47 +55,31 @@ pub struct IcebergTableProvider { } impl IcebergTableProvider { - pub(crate) fn new(table: Table, schema: ArrowSchemaRef) -> Self { - IcebergTableProvider { - table, - snapshot_id: None, - schema, - } - } /// Asynchronously tries to construct a new [`IcebergTableProvider`] /// using the given client and table name to fetch an actual [`Table`] /// in the provided namespace. - pub(crate) async fn try_new( - client: Arc, - namespace: NamespaceIdent, - name: impl Into, - ) -> Result { - let ident = TableIdent::new(namespace, name.into()); - let table = client.load_table(&ident).await?; + pub async fn try_new(client: Arc, table_identifier: TableIdent) -> Result { + let table = client.load_table(&table_identifier).await?; let schema = Arc::new(schema_to_arrow_schema(table.metadata().current_schema())?); Ok(IcebergTableProvider { - table, - snapshot_id: None, - schema, - }) - } - - /// Asynchronously tries to construct a new [`IcebergTableProvider`] - /// using the given table. Can be used to create a table provider from an existing table regardless of the catalog implementation. - pub async fn try_new_from_table(table: Table) -> Result { - let schema = Arc::new(schema_to_arrow_schema(table.metadata().current_schema())?); - Ok(IcebergTableProvider { - table, + table: Arc::new(RwLock::new(table)), + table_identifier, snapshot_id: None, + catalog: client, schema, }) } /// Asynchronously tries to construct a new [`IcebergTableProvider`] /// using a specific snapshot of the given table. Can be used to create a table provider from an existing table regardless of the catalog implementation. - pub async fn try_new_from_table_snapshot(table: Table, snapshot_id: i64) -> Result { + pub async fn try_new_from_table_snapshot( + client: Arc, + table_identifier: TableIdent, + snapshot_id: i64, + ) -> Result { + let table = client.load_table(&table_identifier).await?; let snapshot = table .metadata() .snapshot_by_id(snapshot_id) @@ -105,15 +95,30 @@ impl IcebergTableProvider { let schema = snapshot.schema(table.metadata())?; let schema = Arc::new(schema_to_arrow_schema(&schema)?); Ok(IcebergTableProvider { - table, + table: Arc::new(RwLock::new(table)), + table_identifier, snapshot_id: Some(snapshot_id), + catalog: client, schema, }) } - pub(crate) fn metadata_table(&self, r#type: MetadataTableType) -> IcebergMetadataTableProvider { + /// Refreshes the table metadata to the latest snapshot. + pub async fn refresh_table_metadata(&self) -> Result { + let updated_table = self.catalog.load_table(&self.table_identifier).await?; + + let mut table_guard = self.table.write().await; + *table_guard = updated_table.clone(); + + Ok(updated_table) + } + + pub(crate) async fn metadata_table( + &self, + r#type: MetadataTableType, + ) -> IcebergMetadataTableProvider { IcebergMetadataTableProvider { - table: self.table.clone(), + table: self.table.read().await.clone(), r#type, } } @@ -140,8 +145,13 @@ impl TableProvider for IcebergTableProvider { filters: &[Expr], _limit: Option, ) -> DFResult> { + // Get the latest table metadata from the catalog if it exists + let table = match self.refresh_table_metadata().await.ok() { + Some(table) => table, + None => self.table.read().await.clone(), + }; Ok(Arc::new(IcebergTableScan::new( - self.table.clone(), + table, self.snapshot_id, self.schema.clone(), projection, @@ -161,14 +171,105 @@ impl TableProvider for IcebergTableProvider { #[cfg(test)] mod tests { + use std::collections::HashMap; + use datafusion::common::Column; use datafusion::prelude::SessionContext; - use iceberg::TableIdent; use iceberg::io::FileIO; use iceberg::table::{StaticTable, Table}; + use iceberg::{Namespace, NamespaceIdent, TableCommit, TableCreation, TableIdent}; use super::*; + #[derive(Debug)] + struct TestCatalog { + table: Table, + } + + impl TestCatalog { + fn new(table: Table) -> Self { + Self { table } + } + } + + #[async_trait] + impl Catalog for TestCatalog { + async fn load_table(&self, _table_identifier: &TableIdent) -> Result
{ + Ok(self.table.clone()) + } + + async fn list_namespaces( + &self, + _parent: Option<&NamespaceIdent>, + ) -> Result> { + unimplemented!() + } + + async fn create_namespace( + &self, + _namespace: &NamespaceIdent, + _properties: HashMap, + ) -> Result { + unimplemented!() + } + + async fn get_namespace(&self, _namespace: &NamespaceIdent) -> Result { + unimplemented!() + } + + async fn namespace_exists(&self, _namespace: &NamespaceIdent) -> Result { + unimplemented!() + } + + async fn update_namespace( + &self, + _namespace: &NamespaceIdent, + _properties: HashMap, + ) -> Result<()> { + unimplemented!() + } + + async fn drop_namespace(&self, _namespace: &NamespaceIdent) -> Result<()> { + unimplemented!() + } + + async fn list_tables(&self, _namespace: &NamespaceIdent) -> Result> { + unimplemented!() + } + + async fn create_table( + &self, + _namespace: &NamespaceIdent, + _creation: TableCreation, + ) -> Result
{ + unimplemented!() + } + + async fn drop_table(&self, _table: &TableIdent) -> Result<()> { + unimplemented!() + } + + async fn table_exists(&self, _table: &TableIdent) -> Result { + unimplemented!() + } + + async fn rename_table(&self, _src: &TableIdent, _dest: &TableIdent) -> Result<()> { + unimplemented!() + } + + async fn update_table(&self, _commit: TableCommit) -> Result
{ + unimplemented!() + } + + async fn register_table( + &self, + _table: &TableIdent, + _metadata_location: String, + ) -> Result
{ + unimplemented!() + } + } + async fn get_test_table_from_metadata_file() -> Table { let metadata_file_name = "TableMetadataV2Valid.json"; let metadata_file_path = format!( @@ -191,7 +292,8 @@ mod tests { #[tokio::test] async fn test_try_new_from_table() { let table = get_test_table_from_metadata_file().await; - let table_provider = IcebergTableProvider::try_new_from_table(table.clone()) + let catalog = Arc::new(TestCatalog::new(table.clone())); + let table_provider = IcebergTableProvider::try_new(catalog, table.identifier().clone()) .await .unwrap(); let ctx = SessionContext::new(); @@ -216,10 +318,14 @@ mod tests { async fn test_try_new_from_table_snapshot() { let table = get_test_table_from_metadata_file().await; let snapshot_id = table.metadata().snapshots().next().unwrap().snapshot_id(); - let table_provider = - IcebergTableProvider::try_new_from_table_snapshot(table.clone(), snapshot_id) - .await - .unwrap(); + let catalog = Arc::new(TestCatalog::new(table.clone())); + let table_provider = IcebergTableProvider::try_new_from_table_snapshot( + catalog, + table.identifier().clone(), + snapshot_id, + ) + .await + .unwrap(); let ctx = SessionContext::new(); ctx.register_table("mytable", Arc::new(table_provider)) .unwrap(); @@ -241,7 +347,8 @@ mod tests { #[tokio::test] async fn test_physical_input_schema_consistent_with_logical_input_schema() { let table = get_test_table_from_metadata_file().await; - let table_provider = IcebergTableProvider::try_new_from_table(table.clone()) + let catalog = Arc::new(TestCatalog::new(table.clone())); + let table_provider = IcebergTableProvider::try_new(catalog, table.identifier().clone()) .await .unwrap(); let ctx = SessionContext::new(); diff --git a/crates/integrations/datafusion/src/table/static_catalog.rs b/crates/integrations/datafusion/src/table/static_catalog.rs new file mode 100644 index 0000000000..03fb5abd96 --- /dev/null +++ b/crates/integrations/datafusion/src/table/static_catalog.rs @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! A static catalog provider that implements the [`Catalog`] trait for +//! use in constructing an [`IcebergTableProvider`] from a static table. + +use std::collections::HashMap; + +use async_trait::async_trait; +use iceberg::table::Table; +use iceberg::{ + Catalog, Error, ErrorKind, Namespace, NamespaceIdent, Result, TableCommit, TableCreation, + TableIdent, +}; + +/// Represents a static catalog that contains a single table. +#[derive(Debug)] +pub struct StaticCatalog { + table: Table, +} + +impl StaticCatalog { + pub fn new(table: Table) -> Self { + Self { table } + } +} + +#[async_trait] +impl Catalog for StaticCatalog { + async fn load_table(&self, table_identifier: &TableIdent) -> Result
{ + if self.table.identifier() != table_identifier { + return Err(Error::new( + ErrorKind::TableNotFound, + format!( + "Table with identifier {} not found in static catalog", + table_identifier + ), + )); + } + + Ok(self.table.clone()) + } + + async fn list_namespaces( + &self, + _parent: Option<&NamespaceIdent>, + ) -> Result> { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Listing namespaces is not supported in static catalog", + )) + } + + async fn create_namespace( + &self, + _namespace: &NamespaceIdent, + _properties: HashMap, + ) -> Result { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Creating namespaces is not supported in static catalog", + )) + } + + async fn get_namespace(&self, _namespace: &NamespaceIdent) -> Result { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Getting namespaces is not supported in static catalog", + )) + } + + async fn namespace_exists(&self, _namespace: &NamespaceIdent) -> Result { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Checking namespace existence is not supported in static catalog", + )) + } + + async fn update_namespace( + &self, + _namespace: &NamespaceIdent, + _properties: HashMap, + ) -> Result<()> { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Updating namespaces is not supported in static catalog", + )) + } + + async fn drop_namespace(&self, _namespace: &NamespaceIdent) -> Result<()> { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Dropping namespaces is not supported in static catalog", + )) + } + + async fn list_tables(&self, namespace: &NamespaceIdent) -> Result> { + if self.table.identifier().namespace() == namespace { + return Ok(vec![self.table.identifier().clone()]); + } + Err(Error::new( + ErrorKind::NamespaceNotFound, + format!("Namespace {} not found in static catalog", namespace), + )) + } + + async fn create_table( + &self, + _namespace: &NamespaceIdent, + _creation: TableCreation, + ) -> Result
{ + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Creating tables is not supported in static catalog", + )) + } + + async fn drop_table(&self, _table: &TableIdent) -> Result<()> { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Dropping tables is not supported in static catalog", + )) + } + + async fn table_exists(&self, table: &TableIdent) -> Result { + if self.table.identifier() == table { + return Ok(true); + } + Ok(false) + } + + async fn rename_table(&self, _src: &TableIdent, _dest: &TableIdent) -> Result<()> { + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Renaming tables is not supported in static catalog", + )) + } + + async fn update_table(&self, _commit: TableCommit) -> Result
{ + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Updating tables is not supported in static catalog", + )) + } + + async fn register_table( + &self, + _table: &TableIdent, + _metadata_location: String, + ) -> Result
{ + Err(Error::new( + ErrorKind::FeatureUnsupported, + "Registering tables is not supported in static catalog", + )) + } +} diff --git a/crates/integrations/datafusion/src/table/table_provider_factory.rs b/crates/integrations/datafusion/src/table/table_provider_factory.rs index a6d3146e54..a3a11f1c53 100644 --- a/crates/integrations/datafusion/src/table/table_provider_factory.rs +++ b/crates/integrations/datafusion/src/table/table_provider_factory.rs @@ -24,12 +24,12 @@ use datafusion::catalog::{Session, TableProvider, TableProviderFactory}; use datafusion::error::Result as DFResult; use datafusion::logical_expr::CreateExternalTable; use datafusion::sql::TableReference; -use iceberg::arrow::schema_to_arrow_schema; use iceberg::io::FileIO; use iceberg::table::StaticTable; use iceberg::{Error, ErrorKind, Result, TableIdent}; use super::IcebergTableProvider; +use crate::table::static_catalog::StaticCatalog; use crate::to_datafusion_error; /// A factory that implements DataFusion's `TableProviderFactory` to create `IcebergTableProvider` instances. @@ -126,10 +126,14 @@ impl TableProviderFactory for IcebergTableProviderFactory { .map_err(to_datafusion_error)? .into_table(); - let schema = schema_to_arrow_schema(table.metadata().current_schema()) - .map_err(to_datafusion_error)?; + let table_ident = table.identifier().clone(); + let static_catalog = Arc::new(StaticCatalog::new(table)); - Ok(Arc::new(IcebergTableProvider::new(table, Arc::new(schema)))) + Ok(Arc::new( + IcebergTableProvider::try_new(static_catalog, table_ident) + .await + .map_err(to_datafusion_error)?, + )) } } diff --git a/crates/integrations/datafusion/tests/integration_datafusion_test.rs b/crates/integrations/datafusion/tests/integration_datafusion_test.rs index 1491e4dbff..be0977e1ab 100644 --- a/crates/integrations/datafusion/tests/integration_datafusion_test.rs +++ b/crates/integrations/datafusion/tests/integration_datafusion_test.rs @@ -29,8 +29,8 @@ use expect_test::expect; use iceberg::io::FileIOBuilder; use iceberg::spec::{NestedField, PrimitiveType, Schema, StructType, Type}; use iceberg::test_utils::check_record_batches; -use iceberg::{Catalog, MemoryCatalog, NamespaceIdent, Result, TableCreation}; -use iceberg_datafusion::IcebergCatalogProvider; +use iceberg::{Catalog, MemoryCatalog, NamespaceIdent, Result, TableCreation, TableIdent}; +use iceberg_datafusion::{IcebergCatalogProvider, IcebergTableProvider}; use tempfile::TempDir; fn temp_path() -> String { @@ -307,6 +307,41 @@ async fn test_table_predict_pushdown() -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_table_scan_snapshot() -> Result<()> { + let iceberg_catalog = get_iceberg_catalog(); + let namespace = NamespaceIdent::new("test".to_string()); + set_test_namespace(&iceberg_catalog, &namespace).await?; + + let current_dir = std::env::current_dir().unwrap(); + let metadata_path = current_dir.join("tests/test_data/scan_snapshot_update/test.db/test_table/metadata/00000-754ae971-c49f-4e40-9236-a50fd0884b5d.metadata.json"); + + let table_ident = TableIdent::new(namespace, "test_table".to_string()); + iceberg_catalog + .register_existing_table(&table_ident, metadata_path.display().to_string()) + .await?; + + let client = Arc::new(iceberg_catalog); + let table = Arc::new( + IcebergTableProvider::try_new(Arc::clone(&client) as Arc, table_ident.clone()) + .await?, + ); + + let ctx = SessionContext::new(); + ctx.register_table("df_test", table) + .expect("failed to register table"); + let records = ctx + .sql("select * from df_test") + .await + .unwrap() + .collect() + .await + .unwrap(); + assert_eq!(0, records.len()); + + Ok(()) +} + #[tokio::test] async fn test_metadata_table() -> Result<()> { let iceberg_catalog = get_iceberg_catalog(); @@ -335,6 +370,7 @@ async fn test_metadata_table() -> Result<()> { .collect() .await .unwrap(); + check_record_batches( snapshots, expect![[r#" diff --git a/crates/integrations/datafusion/tests/test_data/scan_snapshot_update/test.db/test_table/metadata/00000-754ae971-c49f-4e40-9236-a50fd0884b5d.metadata.json b/crates/integrations/datafusion/tests/test_data/scan_snapshot_update/test.db/test_table/metadata/00000-754ae971-c49f-4e40-9236-a50fd0884b5d.metadata.json new file mode 100644 index 0000000000..5e66dfd655 --- /dev/null +++ b/crates/integrations/datafusion/tests/test_data/scan_snapshot_update/test.db/test_table/metadata/00000-754ae971-c49f-4e40-9236-a50fd0884b5d.metadata.json @@ -0,0 +1 @@ +{"location":"./tests/test_data/scan_snapshot_update/test.db/test_table","table-uuid":"e94dbe67-55ae-40e6-b0ff-8b48ebabf550","last-updated-ms":1746586823335,"last-column-id":2,"schemas":[{"type":"struct","fields":[{"id":1,"name":"id","type":"string","required":true},{"id":2,"name":"comment","type":"string","required":true}],"schema-id":0,"identifier-field-ids":[]}],"current-schema-id":0,"partition-specs":[{"spec-id":0,"fields":[{"source-id":1,"field-id":1000,"transform":"identity","name":"id"}]}],"default-spec-id":0,"last-partition-id":1000,"properties":{"write.format.default":"parquet","write.parquet.compression-codec":"snappy"},"snapshots":[],"snapshot-log":[],"metadata-log":[],"sort-orders":[{"order-id":0,"fields":[]}],"default-sort-order-id":0,"refs":{},"statistics":[],"format-version":2,"last-sequence-number":0} \ No newline at end of file