diff --git a/Cargo.toml b/Cargo.toml index 76a60e180..93c4f5e49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ utoipa-swagger-ui = { version = "9", features = ["axum"] } lazy_static = { version = "1.5" } snafu = { version = "0.8.5", features = ["futures"] } tracing = { version = "0.1", features = ["attributes"] } -icebucket_history = { version = "0.1.0", path = "crates/history" } +embucket_history = { version = "0.1.0", path = "crates/history" } datafusion = { version = "45.0.0" } datafusion-common = { version = "45.0.0" } diff --git a/bin/bucketd/Cargo.toml b/bin/bucketd/Cargo.toml index 10e4b1abc..d15f0b0b5 100644 --- a/bin/bucketd/Cargo.toml +++ b/bin/bucketd/Cargo.toml @@ -7,7 +7,7 @@ license-file = { workspace = true } [dependencies] clap = { version = "4.5.27", features = ["env", "derive"] } dotenv = { version = "0.15.0" } -icebucket_runtime = { path = "../../crates/runtime" } +embucket_runtime = { path = "../../crates/runtime" } object_store = { workspace = true } snmalloc-rs = { workspace = true } tokio = { workspace = true } diff --git a/bin/bucketd/src/cli.rs b/bin/bucketd/src/cli.rs index 0fa8ae080..440b9f57e 100644 --- a/bin/bucketd/src/cli.rs +++ b/bin/bucketd/src/cli.rs @@ -26,7 +26,7 @@ use std::sync::Arc; #[derive(Parser)] #[command(version, about, long_about=None)] -pub struct IceBucketOpts { +pub struct CliOpts { #[arg( short, long, @@ -152,7 +152,7 @@ enum StoreBackend { Memory, } -impl IceBucketOpts { +impl CliOpts { #[allow(clippy::unwrap_used, clippy::as_conversions)] pub fn object_store_backend(self) -> ObjectStoreResult> { match self.backend { diff --git a/bin/bucketd/src/main.rs b/bin/bucketd/src/main.rs index 08052d1c2..807adfa43 100644 --- a/bin/bucketd/src/main.rs +++ b/bin/bucketd/src/main.rs @@ -19,10 +19,10 @@ pub(crate) mod cli; use clap::Parser; use dotenv::dotenv; -use icebucket_runtime::{ - config::{IceBucketDbConfig, IceBucketRuntimeConfig}, - http::config::IceBucketWebConfig, - run_icebucket, +use embucket_runtime::{ + config::{DbConfig, RuntimeConfig}, + http::config::WebConfig, + run_binary, }; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -36,14 +36,13 @@ async fn main() { tracing_subscriber::registry() .with( - tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| { - "bucketd=debug,icebucket_runtime=debug,tower_http=debug".into() - }), + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| "bucketd=debug,embucket_runtime=debug,tower_http=debug".into()), ) .with(tracing_subscriber::fmt::layer()) .init(); - let opts = cli::IceBucketOpts::parse(); + let opts = cli::CliOpts::parse(); let slatedb_prefix = opts.slatedb_prefix.clone(); let host = opts.host.clone().unwrap(); let iceberg_catalog_url = opts.catalog_url.clone().unwrap(); @@ -65,13 +64,13 @@ async fn main() { return; } Ok(object_store) => { - tracing::info!("Starting 🧊🪣 IceBucket..."); + tracing::info!("Starting embucket"); - let runtime_config = IceBucketRuntimeConfig { - db: IceBucketDbConfig { + let runtime_config = RuntimeConfig { + db: DbConfig { slatedb_prefix: slatedb_prefix.clone(), }, - web: IceBucketWebConfig { + web: WebConfig { host, port, allow_origin, @@ -80,8 +79,8 @@ async fn main() { }, }; - if let Err(e) = run_icebucket(object_store, runtime_config).await { - tracing::error!("Error while running IceBucket: {:?}", e); + if let Err(e) = run_binary(object_store, runtime_config).await { + tracing::error!("Error while running: {:?}", e); } } } diff --git a/crates/history/Cargo.toml b/crates/history/Cargo.toml index 66d4105da..616f18489 100644 --- a/crates/history/Cargo.toml +++ b/crates/history/Cargo.toml @@ -1,11 +1,11 @@ [package] -name = "icebucket_history" +name = "embucket_history" version = "0.1.0" edition = "2021" license-file = { workspace = true } [dependencies] -icebucket_utils = { path = "../utils" } +embucket_utils = { path = "../utils" } bytes = { workspace = true } chrono = { workspace = true, features = ["serde"] } serde = { workspace = true } diff --git a/crates/history/src/entities/query.rs b/crates/history/src/entities/query.rs index f61aaf86a..7f087c689 100644 --- a/crates/history/src/entities/query.rs +++ b/crates/history/src/entities/query.rs @@ -18,7 +18,7 @@ use crate::WorksheetId; use bytes::Bytes; use chrono::{DateTime, Utc}; -use icebucket_utils::iterable::IterableEntity; +use embucket_utils::iterable::IterableEntity; use serde::{Deserialize, Serialize}; use utoipa::ToSchema; diff --git a/crates/history/src/entities/worksheet.rs b/crates/history/src/entities/worksheet.rs index 44cd16e64..cc6b0755b 100644 --- a/crates/history/src/entities/worksheet.rs +++ b/crates/history/src/entities/worksheet.rs @@ -17,7 +17,7 @@ use bytes::Bytes; use chrono::{DateTime, Utc}; -use icebucket_utils::iterable::IterableEntity; +use embucket_utils::iterable::IterableEntity; use serde::{Deserialize, Serialize}; use utoipa::ToSchema; diff --git a/crates/history/src/store.rs b/crates/history/src/store.rs index fc7b126c8..cb5420866 100644 --- a/crates/history/src/store.rs +++ b/crates/history/src/store.rs @@ -17,8 +17,8 @@ use crate::{QueryRecord, QueryRecordId, Worksheet, WorksheetId}; use async_trait::async_trait; -use icebucket_utils::iterable::{IterableCursor, IterableEntity}; -use icebucket_utils::Db; +use embucket_utils::iterable::{IterableCursor, IterableEntity}; +use embucket_utils::Db; use snafu::{ResultExt, Snafu}; use std::sync::Arc; @@ -28,25 +28,25 @@ pub enum WorksheetsStoreError { BadKey { source: std::str::Utf8Error }, #[snafu(display("Error adding worksheet: {source}"))] - WorksheetAdd { source: icebucket_utils::Error }, + WorksheetAdd { source: embucket_utils::Error }, #[snafu(display("Error getting worksheet: {source}"))] - WorksheetGet { source: icebucket_utils::Error }, + WorksheetGet { source: embucket_utils::Error }, #[snafu(display("Error getting worksheets: {source}"))] - WorksheetsList { source: icebucket_utils::Error }, + WorksheetsList { source: embucket_utils::Error }, #[snafu(display("Error deleting worksheet: {source}"))] - WorksheetDelete { source: icebucket_utils::Error }, + WorksheetDelete { source: embucket_utils::Error }, #[snafu(display("Error updating worksheet: {source}"))] - WorksheetUpdate { source: icebucket_utils::Error }, + WorksheetUpdate { source: embucket_utils::Error }, #[snafu(display("Error adding query record: {source}"))] - QueryAdd { source: icebucket_utils::Error }, + QueryAdd { source: embucket_utils::Error }, #[snafu(display("Error getting query history: {source}"))] - QueryGet { source: icebucket_utils::Error }, + QueryGet { source: embucket_utils::Error }, #[snafu(display("Can't locate worksheet by key: {message}"))] WorksheetNotFound { message: String }, @@ -190,7 +190,7 @@ impl WorksheetsStore for SlateDBWorksheetsStore { mod tests { use super::*; use chrono::{Duration, TimeZone, Utc}; - use icebucket_utils::iterable::{IterableCursor, IterableEntity}; + use embucket_utils::iterable::{IterableCursor, IterableEntity}; use tokio; #[tokio::test] diff --git a/crates/metastore/Cargo.toml b/crates/metastore/Cargo.toml index 1d32108df..6fa204ac9 100644 --- a/crates/metastore/Cargo.toml +++ b/crates/metastore/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "icebucket_metastore" +name = "embucket_metastore" version = "0.1.0" edition = "2021" license-file = { workspace = true } @@ -12,7 +12,7 @@ dashmap = "6.1.0" futures = { workspace = true } iceberg-rust = { workspace = true } iceberg-rust-spec = { workspace = true } -icebucket_utils = { path = "../utils" } +embucket_utils = { path = "../utils" } object_store = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/crates/metastore/src/error.rs b/crates/metastore/src/error.rs index bfdb0d6bf..750a9343b 100644 --- a/crates/metastore/src/error.rs +++ b/crates/metastore/src/error.rs @@ -56,7 +56,7 @@ pub enum MetastoreError { }, #[snafu(display("SlateDB error: {source}"))] - UtilSlateDB { source: icebucket_utils::Error }, + UtilSlateDB { source: embucket_utils::Error }, #[snafu(display("Metastore object of type {type_name} with name {name} already exists"))] ObjectAlreadyExists { type_name: String, name: String }, diff --git a/crates/metastore/src/metastore.rs b/crates/metastore/src/metastore.rs index 2a0254067..6c61c6897 100644 --- a/crates/metastore/src/metastore.rs +++ b/crates/metastore/src/metastore.rs @@ -24,11 +24,11 @@ use async_trait::async_trait; use bytes::Bytes; use chrono::Utc; use dashmap::DashMap; +use embucket_utils::list_config::ListConfig; +use embucket_utils::Db; use futures::{StreamExt, TryStreamExt}; use iceberg_rust::catalog::commit::apply_table_updates; use iceberg_rust_spec::table_metadata::{FormatVersion, TableMetadataBuilder}; -use icebucket_utils::list_config::ListConfig; -use icebucket_utils::Db; use object_store::{path::Path, ObjectStore, PutPayload}; use serde::de::DeserializeOwned; use snafu::ResultExt; @@ -36,126 +36,102 @@ use uuid::Uuid; #[async_trait] pub trait Metastore: std::fmt::Debug + Send + Sync { - async fn list_volumes( - &self, - list_config: ListConfig, - ) -> MetastoreResult>>; + async fn list_volumes(&self, list_config: ListConfig) + -> MetastoreResult>>; async fn create_volume( &self, - name: &IceBucketVolumeIdent, - volume: IceBucketVolume, - ) -> MetastoreResult>; - async fn get_volume( - &self, - name: &IceBucketVolumeIdent, - ) -> MetastoreResult>>; + name: &VolumeIdent, + volume: Volume, + ) -> MetastoreResult>; + async fn get_volume(&self, name: &VolumeIdent) -> MetastoreResult>>; async fn update_volume( &self, - name: &IceBucketVolumeIdent, - volume: IceBucketVolume, - ) -> MetastoreResult>; - async fn delete_volume( - &self, - name: &IceBucketVolumeIdent, - cascade: bool, - ) -> MetastoreResult<()>; + name: &VolumeIdent, + volume: Volume, + ) -> MetastoreResult>; + async fn delete_volume(&self, name: &VolumeIdent, cascade: bool) -> MetastoreResult<()>; async fn volume_object_store( &self, - name: &IceBucketVolumeIdent, + name: &VolumeIdent, ) -> MetastoreResult>>; async fn list_databases( &self, list_config: ListConfig, - ) -> MetastoreResult>>; + ) -> MetastoreResult>>; async fn create_database( &self, - name: &IceBucketDatabaseIdent, - database: IceBucketDatabase, - ) -> MetastoreResult>; + name: &DatabaseIdent, + database: Database, + ) -> MetastoreResult>; async fn get_database( &self, - name: &IceBucketDatabaseIdent, - ) -> MetastoreResult>>; + name: &DatabaseIdent, + ) -> MetastoreResult>>; async fn update_database( &self, - name: &IceBucketDatabaseIdent, - database: IceBucketDatabase, - ) -> MetastoreResult>; - async fn delete_database( - &self, - name: &IceBucketDatabaseIdent, - cascade: bool, - ) -> MetastoreResult<()>; + name: &DatabaseIdent, + database: Database, + ) -> MetastoreResult>; + async fn delete_database(&self, name: &DatabaseIdent, cascade: bool) -> MetastoreResult<()>; async fn list_schemas( &self, - database: &IceBucketDatabaseIdent, + database: &DatabaseIdent, list_config: ListConfig, - ) -> MetastoreResult>>; + ) -> MetastoreResult>>; async fn create_schema( &self, - ident: &IceBucketSchemaIdent, - schema: IceBucketSchema, - ) -> MetastoreResult>; - async fn get_schema( - &self, - ident: &IceBucketSchemaIdent, - ) -> MetastoreResult>>; + ident: &SchemaIdent, + schema: Schema, + ) -> MetastoreResult>; + async fn get_schema(&self, ident: &SchemaIdent) -> MetastoreResult>>; async fn update_schema( &self, - ident: &IceBucketSchemaIdent, - schema: IceBucketSchema, - ) -> MetastoreResult>; - async fn delete_schema( - &self, - ident: &IceBucketSchemaIdent, - cascade: bool, - ) -> MetastoreResult<()>; + ident: &SchemaIdent, + schema: Schema, + ) -> MetastoreResult>; + async fn delete_schema(&self, ident: &SchemaIdent, cascade: bool) -> MetastoreResult<()>; async fn list_tables( &self, - schema: &IceBucketSchemaIdent, + schema: &SchemaIdent, list_config: ListConfig, - ) -> MetastoreResult>>; + ) -> MetastoreResult>>; async fn create_table( &self, - ident: &IceBucketTableIdent, - table: IceBucketTableCreateRequest, - ) -> MetastoreResult>; - async fn get_table( - &self, - ident: &IceBucketTableIdent, - ) -> MetastoreResult>>; + ident: &TableIdent, + table: TableCreateRequest, + ) -> MetastoreResult>; + async fn get_table(&self, ident: &TableIdent) -> MetastoreResult>>; async fn update_table( &self, - ident: &IceBucketTableIdent, - update: IceBucketTableUpdate, - ) -> MetastoreResult>; - async fn delete_table(&self, ident: &IceBucketTableIdent, cascade: bool) - -> MetastoreResult<()>; + ident: &TableIdent, + update: TableUpdate, + ) -> MetastoreResult>; + async fn delete_table(&self, ident: &TableIdent, cascade: bool) -> MetastoreResult<()>; async fn table_object_store( &self, - ident: &IceBucketTableIdent, + ident: &TableIdent, ) -> MetastoreResult>>; - async fn table_exists(&self, ident: &IceBucketTableIdent) -> MetastoreResult; - async fn url_for_table(&self, ident: &IceBucketTableIdent) -> MetastoreResult; + async fn table_exists(&self, ident: &TableIdent) -> MetastoreResult; + async fn url_for_table(&self, ident: &TableIdent) -> MetastoreResult; async fn volume_for_table( &self, - ident: &IceBucketTableIdent, - ) -> MetastoreResult>>; + ident: &TableIdent, + ) -> MetastoreResult>>; } /// /// vol -> List of volumes -/// vol/ -> `IceBucketVolume` +/// vol/ -> `Volume` /// db -> List of databases -/// db/ -> `IceBucketDatabase` +/// db/ -> `Database` /// sch/ -> List of schemas for -/// sch// -> `IceBucketSchema` +/// sch// -> `Schema` /// tbl// -> List of tables for in -/// tbl/// -> `IceBucketTable` +/// tbl///
-> `Table` /// const KEY_VOLUME: &str = "vol"; const KEY_DATABASE: &str = "db"; @@ -164,7 +140,7 @@ const KEY_TABLE: &str = "tbl"; pub struct SlateDBMetastore { db: Db, - object_store_cache: DashMap>, + object_store_cache: DashMap>, } impl std::fmt::Debug for SlateDBMetastore { @@ -290,15 +266,15 @@ impl Metastore for SlateDBMetastore { async fn list_volumes( &self, list_config: ListConfig, - ) -> MetastoreResult>> { + ) -> MetastoreResult>> { self.list_objects(KEY_VOLUME, list_config).await } async fn create_volume( &self, - name: &IceBucketVolumeIdent, - volume: IceBucketVolume, - ) -> MetastoreResult> { + name: &VolumeIdent, + volume: Volume, + ) -> MetastoreResult> { let key = format!("{KEY_VOLUME}/{name}"); let object_store = volume.get_object_store()?; let rwobject = self @@ -320,10 +296,7 @@ impl Metastore for SlateDBMetastore { Ok(rwobject) } - async fn get_volume( - &self, - name: &IceBucketVolumeIdent, - ) -> MetastoreResult>> { + async fn get_volume(&self, name: &VolumeIdent) -> MetastoreResult>> { let key = format!("{KEY_VOLUME}/{name}"); self.db .get(&key) @@ -333,9 +306,9 @@ impl Metastore for SlateDBMetastore { async fn update_volume( &self, - name: &IceBucketVolumeIdent, - volume: IceBucketVolume, - ) -> MetastoreResult> { + name: &VolumeIdent, + volume: Volume, + ) -> MetastoreResult> { let key = format!("{KEY_VOLUME}/{name}"); let updated_volume = self.update_object(&key, volume.clone()).await?; let object_store = updated_volume.get_object_store()?; @@ -344,11 +317,7 @@ impl Metastore for SlateDBMetastore { Ok(updated_volume) } - async fn delete_volume( - &self, - name: &IceBucketVolumeIdent, - cascade: bool, - ) -> MetastoreResult<()> { + async fn delete_volume(&self, name: &VolumeIdent, cascade: bool) -> MetastoreResult<()> { let key = format!("{KEY_VOLUME}/{name}"); let databases_using = self .list_databases(ListConfig::default()) @@ -377,7 +346,7 @@ impl Metastore for SlateDBMetastore { async fn volume_object_store( &self, - name: &IceBucketVolumeIdent, + name: &VolumeIdent, ) -> MetastoreResult>> { if let Some(store) = self.object_store_cache.get(name) { Ok(Some(store.clone())) @@ -397,15 +366,15 @@ impl Metastore for SlateDBMetastore { async fn list_databases( &self, list_config: ListConfig, - ) -> MetastoreResult>> { + ) -> MetastoreResult>> { self.list_objects(KEY_DATABASE, list_config).await } async fn create_database( &self, - name: &IceBucketDatabaseIdent, - database: IceBucketDatabase, - ) -> MetastoreResult> { + name: &DatabaseIdent, + database: Database, + ) -> MetastoreResult> { self.get_volume(&database.volume).await?.ok_or( metastore_error::MetastoreError::VolumeNotFound { volume: database.volume.clone(), @@ -417,8 +386,8 @@ impl Metastore for SlateDBMetastore { async fn get_database( &self, - name: &IceBucketDatabaseIdent, - ) -> MetastoreResult>> { + name: &DatabaseIdent, + ) -> MetastoreResult>> { let key = format!("{KEY_DATABASE}/{name}"); self.db .get(&key) @@ -428,18 +397,14 @@ impl Metastore for SlateDBMetastore { async fn update_database( &self, - name: &IceBucketDatabaseIdent, - database: IceBucketDatabase, - ) -> MetastoreResult> { + name: &DatabaseIdent, + database: Database, + ) -> MetastoreResult> { let key = format!("{KEY_DATABASE}/{name}"); self.update_object(&key, database).await } - async fn delete_database( - &self, - name: &IceBucketDatabaseIdent, - cascade: bool, - ) -> MetastoreResult<()> { + async fn delete_database(&self, name: &DatabaseIdent, cascade: bool) -> MetastoreResult<()> { let schemas = self.list_schemas(name, ListConfig::default()).await?; if cascade { let futures = schemas @@ -454,18 +419,18 @@ impl Metastore for SlateDBMetastore { async fn list_schemas( &self, - database: &IceBucketDatabaseIdent, + database: &DatabaseIdent, list_config: ListConfig, - ) -> MetastoreResult>> { + ) -> MetastoreResult>> { let key = format!("{KEY_SCHEMA}/{database}"); self.list_objects(&key, list_config).await } async fn create_schema( &self, - ident: &IceBucketSchemaIdent, - schema: IceBucketSchema, - ) -> MetastoreResult> { + ident: &SchemaIdent, + schema: Schema, + ) -> MetastoreResult> { let key = format!("{KEY_SCHEMA}/{}/{}", ident.database, ident.schema); if self.get_database(&ident.database).await?.is_some() { self.create_object(&key, "schema", schema).await @@ -476,10 +441,7 @@ impl Metastore for SlateDBMetastore { } } - async fn get_schema( - &self, - ident: &IceBucketSchemaIdent, - ) -> MetastoreResult>> { + async fn get_schema(&self, ident: &SchemaIdent) -> MetastoreResult>> { let key = format!("{KEY_SCHEMA}/{}/{}", ident.database, ident.schema); self.db .get(&key) @@ -489,18 +451,14 @@ impl Metastore for SlateDBMetastore { async fn update_schema( &self, - ident: &IceBucketSchemaIdent, - schema: IceBucketSchema, - ) -> MetastoreResult> { + ident: &SchemaIdent, + schema: Schema, + ) -> MetastoreResult> { let key = format!("{KEY_SCHEMA}/{}/{}", ident.database, ident.schema); self.update_object(&key, schema).await } - async fn delete_schema( - &self, - ident: &IceBucketSchemaIdent, - cascade: bool, - ) -> MetastoreResult<()> { + async fn delete_schema(&self, ident: &SchemaIdent, cascade: bool) -> MetastoreResult<()> { let tables = self.list_tables(ident, ListConfig::default()).await?; if cascade { @@ -516,9 +474,9 @@ impl Metastore for SlateDBMetastore { async fn list_tables( &self, - schema: &IceBucketSchemaIdent, + schema: &SchemaIdent, list_config: ListConfig, - ) -> MetastoreResult>> { + ) -> MetastoreResult>> { let key = format!("{KEY_TABLE}/{}/{}", schema.database, schema.schema); self.list_objects(&key, list_config).await } @@ -526,9 +484,9 @@ impl Metastore for SlateDBMetastore { #[allow(clippy::too_many_lines)] async fn create_table( &self, - ident: &IceBucketTableIdent, - mut table: IceBucketTableCreateRequest, - ) -> MetastoreResult> { + ident: &TableIdent, + mut table: TableCreateRequest, + ) -> MetastoreResult> { if let Some(_schema) = self.get_schema(&ident.clone().into()).await? { let key = format!( "{KEY_TABLE}/{}/{}/{}", @@ -542,9 +500,9 @@ impl Metastore for SlateDBMetastore { || Uuid::new_v4().to_string(), std::string::ToString::to_string, ); - let volume = IceBucketVolume { + let volume = Volume { ident: volume_ident.clone(), - volume: IceBucketVolumeType::Memory, + volume: VolumeType::Memory, }; let volume = self.create_volume(&volume_ident, volume).await?; if table.volume_ident.is_none() { @@ -601,7 +559,7 @@ impl Metastore for SlateDBMetastore { table_metadata.location(table_location.clone()); } - let table_format = table.format.unwrap_or(IceBucketTableFormat::Iceberg); + let table_format = table.format.unwrap_or(TableFormat::Iceberg); let table_metadata = table_metadata .build() @@ -610,7 +568,7 @@ impl Metastore for SlateDBMetastore { let mut table_properties = table.properties.unwrap_or_default().clone(); Self::update_properties_timestamps(&mut table_properties); - let table = IceBucketTable { + let table = Table { ident: ident.clone(), metadata: table_metadata.clone(), metadata_location: format!("{table_location}/{metadata_part}"), @@ -651,9 +609,9 @@ impl Metastore for SlateDBMetastore { async fn update_table( &self, - ident: &IceBucketTableIdent, - update: IceBucketTableUpdate, - ) -> MetastoreResult> { + ident: &TableIdent, + update: TableUpdate, + ) -> MetastoreResult> { let mut table = self .get_table(ident) .await? @@ -714,11 +672,7 @@ impl Metastore for SlateDBMetastore { Ok(rw_table) } - async fn delete_table( - &self, - ident: &IceBucketTableIdent, - cascade: bool, - ) -> MetastoreResult<()> { + async fn delete_table(&self, ident: &TableIdent, cascade: bool) -> MetastoreResult<()> { if let Some(table) = self.get_table(ident).await? { if cascade { let object_store = self.table_object_store(ident).await?.ok_or( @@ -766,10 +720,7 @@ impl Metastore for SlateDBMetastore { } } - async fn get_table( - &self, - ident: &IceBucketTableIdent, - ) -> MetastoreResult>> { + async fn get_table(&self, ident: &TableIdent) -> MetastoreResult>> { let key = format!( "{KEY_TABLE}/{}/{}/{}", ident.database, ident.schema, ident.table @@ -782,7 +733,7 @@ impl Metastore for SlateDBMetastore { async fn table_object_store( &self, - ident: &IceBucketTableIdent, + ident: &TableIdent, ) -> MetastoreResult>> { if let Some(volume) = self.volume_for_table(ident).await? { self.volume_object_store(&volume.ident).await @@ -791,11 +742,11 @@ impl Metastore for SlateDBMetastore { } } - async fn table_exists(&self, ident: &IceBucketTableIdent) -> MetastoreResult { + async fn table_exists(&self, ident: &TableIdent) -> MetastoreResult { self.get_table(ident).await.map(|table| table.is_some()) } - async fn url_for_table(&self, ident: &IceBucketTableIdent) -> MetastoreResult { + async fn url_for_table(&self, ident: &TableIdent) -> MetastoreResult { if let Some(tbl) = self.get_table(ident).await? { let database = self.get_database(&ident.database).await?.ok_or( metastore_error::MetastoreError::DatabaseNotFound { @@ -848,8 +799,8 @@ impl Metastore for SlateDBMetastore { async fn volume_for_table( &self, - ident: &IceBucketTableIdent, - ) -> MetastoreResult>> { + ident: &TableIdent, + ) -> MetastoreResult>> { let volume_ident = if let Some(Some(volume_ident)) = self .get_table(ident) .await? @@ -875,7 +826,7 @@ mod tests { use super::*; use futures::StreamExt; use iceberg_rust_spec::{ - schema::Schema, + schema::Schema as IcebergSchema, types::{PrimitiveType, StructField, StructType, Type}, }; use slatedb::db::Db as SlateDb; @@ -910,7 +861,7 @@ mod tests { async fn test_create_volumes() { let ms = get_metastore().await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("test".to_owned(), VolumeType::Memory); ms.create_volume(&"test".to_string(), volume) .await .expect("create volume failed"); @@ -936,12 +887,12 @@ mod tests { async fn test_duplicate_volume() { let ms = get_metastore().await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("test".to_owned(), VolumeType::Memory); ms.create_volume(&"test".to_owned(), volume) .await .expect("create volume failed"); - let volume2 = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume2 = Volume::new("test".to_owned(), VolumeType::Memory); let result = ms.create_volume(&"test".to_owned(), volume2).await; insta::with_settings!({ filters => insta_filters(), @@ -954,7 +905,7 @@ mod tests { async fn test_delete_volume() { let ms = get_metastore().await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("test".to_owned(), VolumeType::Memory); ms.create_volume(&"test".to_string(), volume) .await .expect("create volume failed"); @@ -985,14 +936,14 @@ mod tests { async fn test_update_volume() { let ms = get_metastore().await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("test".to_owned(), VolumeType::Memory); let rwo1 = ms .create_volume(&"test".to_owned(), volume) .await .expect("create volume failed"); - let volume = IceBucketVolume::new( + let volume = Volume::new( "test".to_owned(), - IceBucketVolumeType::File(IceBucketFileVolume { + VolumeType::File(FileVolume { path: "/tmp".to_owned(), }), ); @@ -1010,7 +961,7 @@ mod tests { #[tokio::test] async fn test_create_database() { let ms = get_metastore().await; - let mut database = IceBucketDatabase { + let mut database = Database { ident: "testdb".to_owned(), volume: "testv1".to_owned(), properties: None, @@ -1019,10 +970,10 @@ mod tests { .create_database(&"testdb".to_owned(), database.clone()) .await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); - let volume2 = IceBucketVolume::new( + let volume = Volume::new("test".to_owned(), VolumeType::Memory); + let volume2 = Volume::new( "test2".to_owned(), - IceBucketVolumeType::File(IceBucketFileVolume { + VolumeType::File(FileVolume { path: "/tmp".to_owned(), }), ); @@ -1067,8 +1018,8 @@ mod tests { #[tokio::test] async fn test_schemas() { let ms = get_metastore().await; - let schema = IceBucketSchema { - ident: IceBucketSchemaIdent { + let schema = Schema { + ident: SchemaIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), }, @@ -1079,13 +1030,13 @@ mod tests { .create_schema(&schema.ident.clone(), schema.clone()) .await; - let volume = IceBucketVolume::new("test".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("test".to_owned(), VolumeType::Memory); ms.create_volume(&"testv1".to_owned(), volume) .await .expect("create volume failed"); ms.create_database( &"testdb".to_owned(), - IceBucketDatabase { + Database { ident: "testdb".to_owned(), volume: "testv1".to_owned(), properties: None, @@ -1131,7 +1082,7 @@ mod tests { let db = Db::new(Arc::new(sdb)); let ms = SlateDBMetastore::new(db); - let schema = Schema::builder() + let schema = IcebergSchema::builder() .with_schema_id(0) .with_fields( StructType::builder() @@ -1151,8 +1102,8 @@ mod tests { .build() .expect("schema build failed"); - let table = IceBucketTableCreateRequest { - ident: IceBucketTableIdent { + let table = TableCreateRequest { + ident: TableIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), table: "testtable".to_owned(), @@ -1170,13 +1121,13 @@ mod tests { let no_schema_result = ms.create_table(&table.ident.clone(), table.clone()).await; - let volume = IceBucketVolume::new("testv1".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("testv1".to_owned(), VolumeType::Memory); ms.create_volume(&"testv1".to_owned(), volume) .await .expect("create volume failed"); ms.create_database( &"testdb".to_owned(), - IceBucketDatabase { + Database { ident: "testdb".to_owned(), volume: "testv1".to_owned(), properties: None, @@ -1185,12 +1136,12 @@ mod tests { .await .expect("create database failed"); ms.create_schema( - &IceBucketSchemaIdent { + &SchemaIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), }, - IceBucketSchema { - ident: IceBucketSchemaIdent { + Schema { + ident: SchemaIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), }, @@ -1254,7 +1205,7 @@ mod tests { let db = Db::new(Arc::new(sdb)); let ms = SlateDBMetastore::new(db); - let schema = Schema::builder() + let schema = IcebergSchema::builder() .with_schema_id(0) .with_fields( StructType::builder() @@ -1274,8 +1225,8 @@ mod tests { .build() .expect("schema build failed"); - let table = IceBucketTableCreateRequest { - ident: IceBucketTableIdent { + let table = TableCreateRequest { + ident: TableIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), table: "testtable".to_owned(), @@ -1291,13 +1242,13 @@ mod tests { is_temporary: Some(true), }; - let volume = IceBucketVolume::new("testv1".to_owned(), IceBucketVolumeType::Memory); + let volume = Volume::new("testv1".to_owned(), VolumeType::Memory); ms.create_volume(&"testv1".to_owned(), volume) .await .expect("create volume failed"); ms.create_database( &"testdb".to_owned(), - IceBucketDatabase { + Database { ident: "testdb".to_owned(), volume: "testv1".to_owned(), properties: None, @@ -1306,12 +1257,12 @@ mod tests { .await .expect("create database failed"); ms.create_schema( - &IceBucketSchemaIdent { + &SchemaIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), }, - IceBucketSchema { - ident: IceBucketSchemaIdent { + Schema { + ident: SchemaIdent { database: "testdb".to_owned(), schema: "testschema".to_owned(), }, diff --git a/crates/metastore/src/models/database.rs b/crates/metastore/src/models/database.rs index 20c8d42e5..552b23ae3 100644 --- a/crates/metastore/src/models/database.rs +++ b/crates/metastore/src/models/database.rs @@ -20,22 +20,22 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use validator::Validate; -use super::IceBucketVolumeIdent; +use super::VolumeIdent; /// A database identifier -pub type IceBucketDatabaseIdent = String; +pub type DatabaseIdent = String; #[derive(Validate, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, utoipa::ToSchema)] -pub struct IceBucketDatabase { +pub struct Database { #[validate(length(min = 1))] - pub ident: IceBucketDatabaseIdent, + pub ident: DatabaseIdent, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option>, /// Volume identifier - pub volume: IceBucketVolumeIdent, + pub volume: VolumeIdent, } -impl IceBucketDatabase { +impl Database { #[must_use] pub fn prefix(&self, parent: &str) -> String { format!("{}/{}", parent, self.ident) @@ -48,7 +48,7 @@ mod tests { #[test] fn test_prefix() { - let db = IceBucketDatabase { + let db = Database { ident: "db".to_string(), properties: None, volume: "vol".to_string(), diff --git a/crates/metastore/src/models/schema.rs b/crates/metastore/src/models/schema.rs index b4190b149..b69d841f1 100644 --- a/crates/metastore/src/models/schema.rs +++ b/crates/metastore/src/models/schema.rs @@ -20,46 +20,46 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use validator::Validate; -use super::IceBucketDatabaseIdent; +use super::DatabaseIdent; #[derive(Validate, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, utoipa::ToSchema)] /// A schema identifier -pub struct IceBucketSchemaIdent { +pub struct SchemaIdent { #[validate(length(min = 1))] /// The name of the schema pub schema: String, #[validate(length(min = 1))] /// The database the schema belongs to - pub database: IceBucketDatabaseIdent, + pub database: DatabaseIdent, } -impl IceBucketSchemaIdent { +impl SchemaIdent { #[must_use] - pub const fn new(database: IceBucketDatabaseIdent, schema: String) -> Self { + pub const fn new(database: DatabaseIdent, schema: String) -> Self { Self { schema, database } } } -impl std::fmt::Display for IceBucketSchemaIdent { +impl std::fmt::Display for SchemaIdent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}.{}", self.database, self.schema) } } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, utoipa::ToSchema)] -pub struct IceBucketSchema { - pub ident: IceBucketSchemaIdent, +pub struct Schema { + pub ident: SchemaIdent, pub properties: Option>, } -impl IceBucketSchema { +impl Schema { #[must_use] pub fn prefix(&self, parent: &str) -> String { format!("{}/{}", parent, self.ident.schema) } } -impl std::fmt::Display for IceBucketSchema { +impl std::fmt::Display for Schema { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}.{}", self.ident.database, self.ident.schema) } @@ -71,8 +71,8 @@ mod tests { #[test] fn test_prefix() { - let schema = IceBucketSchema { - ident: IceBucketSchemaIdent { + let schema = Schema { + ident: SchemaIdent { schema: "schema".to_string(), database: "db".to_string(), }, diff --git a/crates/metastore/src/models/table.rs b/crates/metastore/src/models/table.rs index bee7c6cd7..1a0c19490 100644 --- a/crates/metastore/src/models/table.rs +++ b/crates/metastore/src/models/table.rs @@ -17,7 +17,7 @@ use crate::error::{MetastoreError, MetastoreResult}; use iceberg_rust::{ - catalog::commit::{TableRequirement, TableUpdate}, + catalog::commit::{TableRequirement, TableUpdate as IcebergTableUpdate}, spec::table_metadata::TableMetadata, }; use iceberg_rust_spec::{ @@ -28,11 +28,11 @@ use std::{collections::HashMap, fmt::Display}; use utoipa::ToSchema; use validator::Validate; -use super::{IceBucketSchemaIdent, IceBucketVolumeIdent}; +use super::{SchemaIdent, VolumeIdent}; #[derive(Validate, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, utoipa::ToSchema)] /// A table identifier -pub struct IceBucketTableIdent { +pub struct TableIdent { #[validate(length(min = 1))] /// The name of the table pub table: String, @@ -44,7 +44,7 @@ pub struct IceBucketTableIdent { pub database: String, } -impl IceBucketTableIdent { +impl TableIdent { #[must_use] pub fn new(database: &str, schema: &str, table: &str) -> Self { Self { @@ -60,8 +60,8 @@ impl IceBucketTableIdent { } } -impl From for IceBucketSchemaIdent { - fn from(ident: IceBucketTableIdent) -> Self { +impl From for SchemaIdent { + fn from(ident: TableIdent) -> Self { Self { database: ident.database, schema: ident.schema, @@ -69,7 +69,7 @@ impl From for IceBucketSchemaIdent { } } -impl Display for IceBucketTableIdent { +impl Display for TableIdent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}.{}.{}", self.database, self.schema, self.table) } @@ -79,7 +79,7 @@ impl Display for IceBucketTableIdent { Debug, Serialize, Deserialize, Clone, PartialEq, Eq, utoipa::ToSchema, strum::EnumString, )] #[serde(rename_all = "kebab-case")] -pub enum IceBucketTableFormat { +pub enum TableFormat { /* Avro, Orc, @@ -90,7 +90,7 @@ pub enum IceBucketTableFormat { Iceberg, } -impl From for IceBucketTableFormat { +impl From for TableFormat { fn from(value: String) -> Self { match value.to_lowercase().as_str() { "parquet" => Self::Parquet, @@ -99,80 +99,24 @@ impl From for IceBucketTableFormat { } } -/*#[derive(Validate, Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct IceBucketSimpleSchema { - pub fields: Vec, - pub schema_id: Option, -} - -impl TryFrom for Schema { - type Error = MetastoreError; - fn try_from(schema: IceBucketSimpleSchema) -> MetastoreResult { - let mut builder = Self::builder(); - builder = builder.with_fields(schema.fields); - if let Some(schema_id) = schema.schema_id { - builder = builder.with_schema_id(schema_id); - } - builder.build() - .context(metastore_error::IcebergSnafu) - } -} - -type SimpleOrIcebergSchema = Either;*/ - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct IceBucketTable { - pub ident: IceBucketTableIdent, +pub struct Table { + pub ident: TableIdent, pub metadata: TableMetadata, pub metadata_location: String, pub properties: HashMap, - pub volume_ident: Option, + pub volume_ident: Option, pub volume_location: Option, pub is_temporary: bool, - pub format: IceBucketTableFormat, -} - -/*impl PartialSchema for IceBucketTable { - fn schema() -> openapi::RefOr { - - let table_metadata_schema = openapi::ObjectBuilder::new() - .property("format_version", openapi::ObjectBuilder::new() - .schema_type(openapi::Type::Integer) - .format(Some(openapi::SchemaFormat::KnownFormat(openapi::KnownFormat::Int32))) - .build() - ) - .property( - "table_uuid", - openapi::Object::with_type(openapi::Type::String)) - .property("name", openapi::schema::String::default()) - .property("schema_id", openapi::schema::Integer::default()) - .property("current_schema_id", openapi::schema::Integer::default()) - .property("default_partition_spec_id", openapi::schema::Integer::default()) - .property("default_sort_order_id", openapi::schema::Integer::default()) - .property("last_partition_id", openapi::schema::Integer::default()) - .property("last_column_id", openapi::schema::Integer::default()) - .property("refs", openapi::schema::Object::default()) - .property("properties", utoipa_schema::Map::default()) - .property("schema", openapi::schema::Object::default()) - .property("partition_spec", openapi::schema::Object::default()) - .property("sort_order", openapi::schema::Object::default()) - .build(); - openapi::ObjectBuilder::default() - .property("ident", IceBucketTableIdent::schema()) - .property("metadata", table_metadata_schema) - .property("metadata_location", openapi::schema::String::default()) - .property("properties", utoipa_schema::Map::default()) - .build() - } + pub format: TableFormat, } -impl ToSchema for IceBucketTable {}*/ #[derive(Validate, Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct IceBucketTableCreateRequest { +pub struct TableCreateRequest { #[validate(nested)] - pub ident: IceBucketTableIdent, + pub ident: TableIdent, pub properties: Option>, - pub format: Option, + pub format: Option, pub location: Option, //pub schema: SimpleOrIcebergSchema, @@ -180,78 +124,10 @@ pub struct IceBucketTableCreateRequest { pub partition_spec: Option, pub sort_order: Option, pub stage_create: Option, - pub volume_ident: Option, + pub volume_ident: Option, pub is_temporary: Option, } -/*fn type_schema() -> (String, openapi::RefOr) { - let primitive_type = openapi::OneOfBuilder::new() - .item(openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::String)) - .enum_values(Some(vec!["boolean", "int", "long", "float", "double", "date", "time", "timestamp", "timestamptz", "string", "uuid", "binary"])) - ) - .item(openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Object)) - .property("precision", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Integer)) - .build()) - .property("scale", openapi::schema::Type::Integer) - ) - .item(openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Integer))) - .build(); - let struct_type = openapi::RefOr::Ref(openapi::Ref::builder().ref_location_from_schema_name("StructType".to_string()).build()); - let list_type = openapi::ObjectBuilder::new() - .property("element_id", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Integer)) - .build() - ) - .property("element_required", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Boolean)) - .build() - ) - .property("element", openapi::RefOr::Ref(openapi::Ref::builder().ref_location_from_schema_name("Type".to_string()).build())) - .build(); - let map_type = openapi::ObjectBuilder::new() - .property("key_id", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Integer)) - .build() - ) - .property("key", openapi::RefOr::Ref(openapi::Ref::builder().ref_location_from_schema_name("Type".to_string()).build())) - .property("value_id", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Integer)) - .build() - ) - .property("value", openapi::RefOr::Ref(openapi::Ref::builder().ref_location_from_schema_name("Type".to_string()).build())) - .property("value_required", openapi::ObjectBuilder::new() - .schema_type(openapi::schema::SchemaType::new(openapi::schema::Type::Boolean)) - .build() - ) - .build(); - let one_of = openapi::OneOf::builder() - .item(primitive_type.into()) - .item(struct_type) - .item(list_type) - .item(map_type); - ("Type".to_string(), one_of.into()) -} - -impl ToSchema for IceBucketTableCreateRequest {} -impl PartialSchema for IceBucketTableCreateRequest { - fn schema() -> utoipa::openapi::RefOr { - - let - let mut type_schema = openapi::OneOfBuilder::new() - .item(primitive_type) - - - let mut struct_field_type = openapi::OneOfBuilder::new() - .item(primitive_type) - let struct_field = openapi::ObjectBuilder::new() - .property("id", ) - } -}*/ - #[derive(ToSchema, Deserialize, Serialize)] enum MyPrimitive { Int, @@ -294,33 +170,6 @@ struct MySchema { #[serde(flatten)] fields: MyStruct, } -/*impl TryFrom for iceberg::TableCreation { - type Error = MetastoreError; - - fn try_from(schema: IceBucketTableCreateRequest) -> MetastoreResult { - let mut properties = schema.properties.unwrap_or_default(); - let utc_now = Utc::now(); - let utc_now_str = utc_now.to_rfc3339(); - properties.insert("created_at".to_string(), utc_now_str.clone()); - properties.insert("updated_at".to_string(), utc_now_str); - - let table_schema = match schema.schema { - Either::Left(simple_schema) => { - Schema::try_from(simple_schema)? - } - Either::Right(schema) => schema, - }; - - Ok(Self { - name: schema.ident.table, - location: schema.location, - schema: table_schema, - partition_spec: schema.partition_spec.map(std::convert::Into::into), - sort_order: schema.write_order, - properties, - }) - } -}*/ #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Config { @@ -329,11 +178,11 @@ pub struct Config { } #[derive(Debug, Serialize, Deserialize, PartialEq)] -pub struct IceBucketTableUpdate { +pub struct TableUpdate { /// Commit will fail if the requirements are not met. pub requirements: Vec, /// The updates of the table. - pub updates: Vec, + pub updates: Vec, } pub struct TableRequirementExt(TableRequirement); diff --git a/crates/metastore/src/models/volumes.rs b/crates/metastore/src/models/volumes.rs index 5fa5da750..6033c4ea7 100644 --- a/crates/metastore/src/models/volumes.rs +++ b/crates/metastore/src/models/volumes.rs @@ -69,7 +69,7 @@ impl Validate for AwsCredentials { #[derive(Validate, Serialize, Deserialize, Debug, Clone, PartialEq, Eq, utoipa::ToSchema)] #[serde(rename_all = "kebab-case")] -pub struct IceBucketS3Volume { +pub struct S3Volume { #[validate(length(min = 1))] pub region: Option, #[validate(length(min = 1), custom(function = "validate_bucket_name"))] @@ -85,7 +85,7 @@ pub struct IceBucketS3Volume { #[derive(Validate, Serialize, Deserialize, Debug, Clone, PartialEq, Eq, utoipa::ToSchema)] #[serde(rename_all = "kebab-case")] -pub struct IceBucketS3TablesVolume { +pub struct S3TablesVolume { #[validate(length(min = 1))] pub region: String, #[validate(length(min = 1), custom(function = "validate_bucket_name"))] @@ -100,10 +100,10 @@ pub struct IceBucketS3TablesVolume { pub arn: String, } -impl IceBucketS3TablesVolume { +impl S3TablesVolume { #[must_use] pub fn s3_builder(&self) -> AmazonS3Builder { - let s3_volume = IceBucketS3Volume { + let s3_volume = S3Volume { region: Some(self.region.clone()), bucket: Some(self.name.clone()), endpoint: Some(self.endpoint.clone()), @@ -111,7 +111,7 @@ impl IceBucketS3TablesVolume { metadata_endpoint: None, credentials: Some(self.credentials.clone()), }; - IceBucketVolume::get_s3_builder(&s3_volume) + Volume::get_s3_builder(&s3_volume) } } @@ -138,21 +138,21 @@ fn validate_bucket_name(bucket_name: &str) -> Result<(), ValidationError> { #[derive(Validate, Serialize, Deserialize, Debug, Clone, PartialEq, Eq, utoipa::ToSchema)] #[serde(rename_all = "kebab-case")] -pub struct IceBucketFileVolume { +pub struct FileVolume { #[validate(length(min = 1))] pub path: String, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, utoipa::ToSchema)] #[serde(tag = "type", rename_all = "kebab-case")] -pub enum IceBucketVolumeType { - S3(IceBucketS3Volume), - S3Tables(IceBucketS3TablesVolume), - File(IceBucketFileVolume), +pub enum VolumeType { + S3(S3Volume), + S3Tables(S3TablesVolume), + File(FileVolume), Memory, } -impl Validate for IceBucketVolumeType { +impl Validate for VolumeType { fn validate(&self) -> Result<(), ValidationErrors> { match self { Self::S3(volume) => volume.validate(), @@ -165,49 +165,49 @@ impl Validate for IceBucketVolumeType { #[derive(Validate, Serialize, Deserialize, Debug, Clone, PartialEq, Eq, utoipa::ToSchema)] #[serde(rename_all = "kebab-case")] -pub struct IceBucketVolume { - pub ident: IceBucketVolumeIdent, +pub struct Volume { + pub ident: VolumeIdent, #[serde(flatten)] #[validate(nested)] - pub volume: IceBucketVolumeType, + pub volume: VolumeType, } -pub type IceBucketVolumeIdent = String; +pub type VolumeIdent = String; #[allow(clippy::as_conversions)] -impl IceBucketVolume { +impl Volume { #[must_use] - pub const fn new(ident: IceBucketVolumeIdent, volume: IceBucketVolumeType) -> Self { + pub const fn new(ident: VolumeIdent, volume: VolumeType) -> Self { Self { ident, volume } } pub fn get_object_store(&self) -> MetastoreResult> { match &self.volume { - IceBucketVolumeType::S3(volume) => { + VolumeType::S3(volume) => { let s3_builder = Self::get_s3_builder(volume); s3_builder .build() .map(|s3| Arc::new(s3) as Arc) .context(metastore_error::ObjectStoreSnafu) } - IceBucketVolumeType::S3Tables(volume) => { + VolumeType::S3Tables(volume) => { let s3_builder = volume.s3_builder(); s3_builder .build() .map(|s3| Arc::new(s3) as Arc) .context(metastore_error::ObjectStoreSnafu) } - IceBucketVolumeType::File(_) => Ok(Arc::new( + VolumeType::File(_) => Ok(Arc::new( object_store::local::LocalFileSystem::new().with_automatic_cleanup(true), ) as Arc), - IceBucketVolumeType::Memory => { + VolumeType::Memory => { Ok(Arc::new(object_store::memory::InMemory::new()) as Arc) } } } #[must_use] - pub fn get_s3_builder(volume: &IceBucketS3Volume) -> AmazonS3Builder { + pub fn get_s3_builder(volume: &S3Volume) -> AmazonS3Builder { let mut s3_builder = AmazonS3Builder::new() .with_conditional_put(object_store::aws::S3ConditionalPut::ETagMatch); @@ -245,16 +245,16 @@ impl IceBucketVolume { #[must_use] pub fn prefix(&self) -> String { match &self.volume { - IceBucketVolumeType::S3(volume) => volume + VolumeType::S3(volume) => volume .bucket .as_ref() .map_or_else(|| "s3://".to_string(), |bucket| format!("s3://{bucket}")), - IceBucketVolumeType::S3Tables(volume) => volume + VolumeType::S3Tables(volume) => volume .bucket .as_ref() .map_or_else(|| "s3://".to_string(), |bucket| format!("s3://{bucket}")), - IceBucketVolumeType::File(volume) => format!("file://{}", volume.path), - IceBucketVolumeType::Memory => "memory://".to_string(), + VolumeType::File(volume) => format!("file://{}", volume.path), + VolumeType::Memory => "memory://".to_string(), } } diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_database.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_database.snap similarity index 87% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_database.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_database.snap index ad2d3fe69..9ced07b88 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_database.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_database.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: "(no_volume_result, all_databases, fetched_db, all_dbs_after)" -snapshot_kind: text --- ( Err( @@ -11,7 +10,7 @@ snapshot_kind: text ), [ RwObject { - data: IceBucketDatabase { + data: Database { ident: "testdb", properties: None, volume: "testv1", @@ -22,7 +21,7 @@ snapshot_kind: text ], Some( RwObject { - data: IceBucketDatabase { + data: Database { ident: "testdb", properties: None, volume: "testv2", diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_volumes.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_volumes.snap similarity index 89% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_volumes.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_volumes.snap index cb4531088..5ce6ee83e 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__create_volumes.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__create_volumes.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: "(test_volume, all_volumes)" -snapshot_kind: text --- ( Some( @@ -14,7 +13,7 @@ snapshot_kind: text ), [ RwObject { - data: IceBucketVolume { + data: Volume { ident: "test", volume: Memory, }, diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__delete_volume.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__delete_volume.snap similarity index 84% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__delete_volume.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__delete_volume.snap index ffafa7da0..efdd6567a 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__delete_volume.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__delete_volume.snap @@ -1,12 +1,11 @@ --- source: crates/metastore/src/metastore.rs expression: "(all_volumes, get_volume, all_volumes_after)" -snapshot_kind: text --- ( [ RwObject { - data: IceBucketVolume { + data: Volume { ident: "test", volume: Memory, }, @@ -16,7 +15,7 @@ snapshot_kind: text ], Some( RwObject { - data: IceBucketVolume { + data: Volume { ident: "test", volume: Memory, }, diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__duplicate_volume.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__duplicate_volume.snap similarity index 86% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__duplicate_volume.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__duplicate_volume.snap index 51f5e9966..4c4589525 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__duplicate_volume.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__duplicate_volume.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: result -snapshot_kind: text --- Err( VolumeAlreadyExists { diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__schemas.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__schemas.snap similarity index 79% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__schemas.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__schemas.snap index 17bf45a04..23aeba83e 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__schemas.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__schemas.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: "(no_db_result, schema_create, schema_list, schema_get, schema_list_after)" -snapshot_kind: text --- ( Err( @@ -10,8 +9,8 @@ snapshot_kind: text }, ), RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { + data: Schema { + ident: SchemaIdent { schema: "testschema", database: "testdb", }, @@ -22,8 +21,8 @@ snapshot_kind: text }, [ RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { + data: Schema { + ident: SchemaIdent { schema: "testschema", database: "testdb", }, @@ -35,8 +34,8 @@ snapshot_kind: text ], Some( RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { + data: Schema { + ident: SchemaIdent { schema: "testschema", database: "testdb", }, diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__tables.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__tables.snap similarity index 97% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__tables.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__tables.snap index 98c04a3cb..3310d0cfc 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__tables.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__tables.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: "(no_schema_result, table_create, paths, table_list, table_get,\ntable_list_after)" -snapshot_kind: text --- ( Err( @@ -11,8 +10,8 @@ snapshot_kind: text }, ), RwObject { - data: IceBucketTable { - ident: IceBucketTableIdent { + data: Table { + ident: TableIdent { table: "testtable", schema: "testschema", database: "testdb", @@ -105,8 +104,8 @@ snapshot_kind: text ), [ RwObject { - data: IceBucketTable { - ident: IceBucketTableIdent { + data: Table { + ident: TableIdent { table: "testtable", schema: "testschema", database: "testdb", @@ -183,8 +182,8 @@ snapshot_kind: text ], Some( RwObject { - data: IceBucketTable { - ident: IceBucketTableIdent { + data: Table { + ident: TableIdent { table: "testtable", schema: "testschema", database: "testdb", diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__temporary_tables.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__temporary_tables.snap similarity index 96% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__temporary_tables.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__temporary_tables.snap index d6707388b..7277e0507 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__temporary_tables.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__temporary_tables.snap @@ -1,7 +1,6 @@ --- source: crates/metastore/src/metastore.rs expression: "(create_table.volume_ident.as_ref(), paths)" -snapshot_kind: text --- ( Some( diff --git a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__update_volume.snap b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__update_volume.snap similarity index 78% rename from crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__update_volume.snap rename to crates/metastore/src/snapshots/embucket_metastore__metastore__tests__update_volume.snap index b77c0a19f..ce2a926b2 100644 --- a/crates/metastore/src/snapshots/icebucket_metastore__metastore__tests__update_volume.snap +++ b/crates/metastore/src/snapshots/embucket_metastore__metastore__tests__update_volume.snap @@ -1,11 +1,10 @@ --- source: crates/metastore/src/metastore.rs expression: "(rwo1, rwo2)" -snapshot_kind: text --- ( RwObject { - data: IceBucketVolume { + data: Volume { ident: "test", volume: Memory, }, @@ -13,10 +12,10 @@ snapshot_kind: text updated_at: "TIMESTAMP", }, RwObject { - data: IceBucketVolume { + data: Volume { ident: "test", volume: File( - IceBucketFileVolume { + FileVolume { path: "/tmp", }, ), diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__create_database.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__create_database.snap deleted file mode 100644 index 9230faa6d..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__create_database.snap +++ /dev/null @@ -1,36 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: "(no_volume_result, all_databases, fetched_db, all_dbs_after)" -snapshot_kind: text ---- -( - Err( - ObjectNotFound { - type_name: "volume", - name: "testv1", - }, - ), - [ - RwObject { - data: IceBucketDatabase { - name: "testdb", - properties: None, - volume: "testv1", - }, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ], - Some( - RwObject { - data: IceBucketDatabase { - name: "testdb", - properties: None, - volume: "testv2", - }, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ), - [], -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__create_volumes.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__create_volumes.snap deleted file mode 100644 index af2d897d2..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__create_volumes.snap +++ /dev/null @@ -1,24 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: "(volumes_key, test_volume, all_volumes)" -snapshot_kind: text ---- -( - [ - "vol/test", - ], - Some( - Object { - "created_at: "TIMESTAMP", - "type": String("memory"), - "updated_at: "TIMESTAMP", - }, - ), - [ - RwObject { - data: Memory, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ], -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__delete_volume.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__delete_volume.snap deleted file mode 100644 index 11ad7d92b..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__delete_volume.snap +++ /dev/null @@ -1,16 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: "(all_volumes, all_volumes_after, get_volume)" -snapshot_kind: text ---- -( - [ - RwObject { - data: Memory, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ], - [], - None, -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__duplicate_volume.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__duplicate_volume.snap deleted file mode 100644 index 143602619..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__duplicate_volume.snap +++ /dev/null @@ -1,11 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: result -snapshot_kind: text ---- -Err( - ObjectAlreadyExists { - type_name: "volume", - name: "vol/test", - }, -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__schemas.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__schemas.snap deleted file mode 100644 index d150df658..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__schemas.snap +++ /dev/null @@ -1,51 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: "(no_db_result, schema_create, schema_list, schema_get, schema_list_after)" -snapshot_kind: text ---- -( - Err( - ObjectNotFound { - type_name: "database", - name: "testdb", - }, - ), - RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { - schema: "testschema", - database: "testdb", - }, - properties: None, - }, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - [ - RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { - schema: "testschema", - database: "testdb", - }, - properties: None, - }, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ], - Some( - RwObject { - data: IceBucketSchema { - ident: IceBucketSchemaIdent { - schema: "testschema", - database: "testdb", - }, - properties: None, - }, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - ), - [], -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__tables.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__tables.snap deleted file mode 100644 index 9dba6d565..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__tables.snap +++ /dev/null @@ -1,165 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: (paths) -snapshot_kind: text ---- -Ok( - [ - Ok( - ObjectMeta { - location: Path { - raw: "manifest/00000000000000000001.manifest", - }, - last_modified: 2025-02-25T23:32:55.478965Z, - size: 80, - e_tag: Some( - "0", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "manifest/00000000000000000002.manifest", - }, - last_modified: 2025-02-25T23:32:55.479123Z, - size: 88, - e_tag: Some( - "1", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "manifest/00000000000000000003.manifest", - }, - last_modified: 2025-02-25T23:32:55.481513Z, - size: 96, - e_tag: Some( - "3", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000001.sst", - }, - last_modified: 2025-02-25T23:32:55.480324Z, - size: 68, - e_tag: Some( - "2", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000002.sst", - }, - last_modified: 2025-02-25T23:32:55.482291Z, - size: 280, - e_tag: Some( - "4", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000003.sst", - }, - last_modified: 2025-02-25T23:32:55.583601Z, - size: 186, - e_tag: Some( - "5", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000004.sst", - }, - last_modified: 2025-02-25T23:32:55.682588Z, - size: 297, - e_tag: Some( - "6", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000005.sst", - }, - last_modified: 2025-02-25T23:32:55.783109Z, - size: 191, - e_tag: Some( - "7", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000006.sst", - }, - last_modified: 2025-02-25T23:32:55.883644Z, - size: 373, - e_tag: Some( - "8", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000007.sst", - }, - last_modified: 2025-02-25T23:32:55.982867Z, - size: 220, - e_tag: Some( - "9", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000008.sst", - }, - last_modified: 2025-02-25T23:32:56.083674Z, - size: 1331, - e_tag: Some( - "10", - ), - version: None, - }, - ), - Ok( - ObjectMeta { - location: Path { - raw: "wal/00000000000000000009.sst", - }, - last_modified: 2025-02-25T23:32:56.183554Z, - size: 256, - e_tag: Some( - "11", - ), - version: None, - }, - ), - ], -) diff --git a/crates/metastore/src/snapshots/metastore__metastore__tests__update_volume.snap b/crates/metastore/src/snapshots/metastore__metastore__tests__update_volume.snap deleted file mode 100644 index ae1374a4a..000000000 --- a/crates/metastore/src/snapshots/metastore__metastore__tests__update_volume.snap +++ /dev/null @@ -1,21 +0,0 @@ ---- -source: crates/metastore/src/metastore.rs -expression: "(rwo1, rwo2)" -snapshot_kind: text ---- -( - RwObject { - data: Memory, - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, - RwObject { - data: File( - IceBucketFileVolume { - path: "/tmp", - }, - ), - created_at: "TIMESTAMP", - updated_at: "TIMESTAMP", - }, -) diff --git a/crates/runtime/Cargo.toml b/crates/runtime/Cargo.toml index 01efdce51..2afc38980 100644 --- a/crates/runtime/Cargo.toml +++ b/crates/runtime/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "icebucket_runtime" +name = "embucket_runtime" version = "0.1.0" edition = "2021" license-file = { workspace = true } @@ -46,9 +46,9 @@ iceberg-rest-catalog = { workspace = true } iceberg-rust = { workspace = true } iceberg-rust-spec = { workspace = true } iceberg-s3tables-catalog = { workspace = true } -icebucket_history = { workspace= true } -icebucket_metastore = { version = "0.1.0", path = "../metastore" } -icebucket_utils = { version = "0.1.0", path = "../utils" } +embucket_history = { workspace= true } +embucket_metastore = { version = "0.1.0", path = "../metastore" } +embucket_utils = { version = "0.1.0", path = "../utils" } indexmap = { version = "2.7.1" } itertools = "0.14.0" object_store = { workspace = true } diff --git a/crates/runtime/src/config.rs b/crates/runtime/src/config.rs index cebe5944f..a598a2433 100644 --- a/crates/runtime/src/config.rs +++ b/crates/runtime/src/config.rs @@ -17,15 +17,15 @@ use serde::{Deserialize, Serialize}; -use crate::http::config::IceBucketWebConfig; +use crate::http::config::WebConfig; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IceBucketRuntimeConfig { - pub web: IceBucketWebConfig, - pub db: IceBucketDbConfig, +pub struct RuntimeConfig { + pub web: WebConfig, + pub db: DbConfig, } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IceBucketDbConfig { +pub struct DbConfig { pub slatedb_prefix: String, } diff --git a/crates/runtime/src/execution/catalogs/catalog.rs b/crates/runtime/src/execution/catalogs/catalog.rs index dd7720c78..7c8065322 100644 --- a/crates/runtime/src/execution/catalogs/catalog.rs +++ b/crates/runtime/src/execution/catalogs/catalog.rs @@ -18,19 +18,19 @@ use std::{any::Any, sync::Arc}; use crate::execution::catalogs::metastore::CatalogProviderCache; -use crate::execution::catalogs::schema::IceBucketDFSchema; +use crate::execution::catalogs::schema::DFSchema; use datafusion::catalog::{CatalogProvider, SchemaProvider}; +use embucket_metastore::Metastore; use iceberg_rust::catalog::Catalog as IcebergCatalog; -use icebucket_metastore::Metastore; -pub struct IceBucketDFCatalog { +pub struct DFCatalog { pub ident: String, pub metastore: Arc, pub mirror: Arc, pub iceberg_catalog: Arc, } -impl IceBucketDFCatalog { +impl DFCatalog { #[must_use] pub fn catalog(&self) -> Arc { self.iceberg_catalog.clone() @@ -38,15 +38,15 @@ impl IceBucketDFCatalog { } #[allow(clippy::missing_fields_in_debug)] -impl std::fmt::Debug for IceBucketDFCatalog { +impl std::fmt::Debug for DFCatalog { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("IceBucketDFCatalog") + f.debug_struct("DFCatalog") .field("ident", &self.ident) .finish() } } -impl CatalogProvider for IceBucketDFCatalog { +impl CatalogProvider for DFCatalog { fn as_any(&self) -> &dyn Any { self } @@ -63,7 +63,7 @@ impl CatalogProvider for IceBucketDFCatalog { fn schema(&self, name: &str) -> Option> { if let Some(db) = self.mirror.get(&self.ident) { if db.contains_key(name) { - let schema: Arc = Arc::new(IceBucketDFSchema { + let schema: Arc = Arc::new(DFSchema { database: self.ident.clone(), schema: name.to_string(), metastore: self.metastore.clone(), diff --git a/crates/runtime/src/execution/catalogs/iceberg_catalog.rs b/crates/runtime/src/execution/catalogs/iceberg_catalog.rs index 15968c52e..70b7c230c 100644 --- a/crates/runtime/src/execution/catalogs/iceberg_catalog.rs +++ b/crates/runtime/src/execution/catalogs/iceberg_catalog.rs @@ -18,6 +18,13 @@ use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; +use embucket_metastore::error::{MetastoreError, MetastoreResult}; +use embucket_metastore::{ + Metastore, Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent, + TableCreateRequest as MetastoreTableCreateRequest, TableIdent as MetastoreTableIdent, + TableUpdate as MetastoreTableUpdate, +}; +use embucket_utils::list_config::ListConfig; use futures::executor::block_on; use iceberg_rust::{ catalog::{ @@ -39,23 +46,17 @@ use iceberg_rust::{ use iceberg_rust_spec::{ identifier::FullIdentifier as IcebergFullIdentifier, namespace::Namespace as IcebergNamespace, }; -use icebucket_metastore::error::{MetastoreError, MetastoreResult}; -use icebucket_metastore::{ - IceBucketSchema, IceBucketSchemaIdent, IceBucketTableCreateRequest, IceBucketTableIdent, - IceBucketTableUpdate, Metastore, -}; -use icebucket_utils::list_config::ListConfig; use object_store::ObjectStore; use snafu::ResultExt; #[derive(Debug)] -pub struct IceBucketIcebergBridge { +pub struct IcebergBridge { pub metastore: Arc, pub database: String, pub object_store: Arc, } -impl IceBucketIcebergBridge { +impl IcebergBridge { pub fn new(metastore: Arc, database: String) -> MetastoreResult { let db = block_on(metastore.get_database(&database))?.ok_or( MetastoreError::DatabaseNotFound { @@ -75,8 +76,8 @@ impl IceBucketIcebergBridge { } #[must_use] - pub fn ident(&self, identifier: &IcebergIdentifier) -> IceBucketTableIdent { - IceBucketTableIdent { + pub fn ident(&self, identifier: &IcebergIdentifier) -> MetastoreTableIdent { + MetastoreTableIdent { database: self.database.to_string(), schema: identifier.namespace().to_string(), table: identifier.name().to_string(), @@ -85,7 +86,7 @@ impl IceBucketIcebergBridge { } #[async_trait] -impl IcebergCatalog for IceBucketIcebergBridge { +impl IcebergCatalog for IcebergBridge { /// Name of the catalog fn name(&self) -> &str { &self.database @@ -102,11 +103,11 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; - let schema = IceBucketSchema { + let schema = MetastoreSchema { ident: schema_ident.clone(), properties: properties.clone(), }; @@ -125,7 +126,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; @@ -146,7 +147,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; @@ -176,7 +177,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; @@ -218,7 +219,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; @@ -240,7 +241,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { "Nested namespaces are not supported".to_string(), )); } - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: self.name().to_string(), schema: namespace.join(""), }; @@ -357,7 +358,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { create_table: IcebergCreateTable, ) -> Result { let ident = self.ident(&identifier); - let table_create_request = IceBucketTableCreateRequest { + let table_create_request = MetastoreTableCreateRequest { ident: ident.clone(), schema: create_table.schema, location: create_table.location, @@ -407,7 +408,7 @@ impl IcebergCatalog for IceBucketIcebergBridge { commit: IcebergCommitTable, ) -> Result { let table_ident = self.ident(&commit.identifier); - let table_update = IceBucketTableUpdate { + let table_update = MetastoreTableUpdate { requirements: commit.requirements, updates: commit.updates, }; diff --git a/crates/runtime/src/execution/catalogs/metastore.rs b/crates/runtime/src/execution/catalogs/metastore.rs index f1702ee15..5f9b57a3c 100644 --- a/crates/runtime/src/execution/catalogs/metastore.rs +++ b/crates/runtime/src/execution/catalogs/metastore.rs @@ -21,8 +21,8 @@ use std::{ sync::Arc, }; -use crate::execution::catalogs::catalog::IceBucketDFCatalog; -use crate::execution::catalogs::iceberg_catalog::IceBucketIcebergBridge; +use crate::execution::catalogs::catalog::DFCatalog; +use crate::execution::catalogs::iceberg_catalog::IcebergBridge; use crate::execution::error::{self as ex_error, ExecutionResult}; use dashmap::DashMap; use datafusion::{ @@ -33,31 +33,31 @@ use datafusion::{ }; use datafusion_common::DataFusionError; use datafusion_iceberg::DataFusionTable as IcebergDataFusionTable; +use embucket_metastore::{error::MetastoreError, Metastore}; +use embucket_utils::list_config::ListConfig; use iceberg_rust::{ catalog::Catalog as IcebergCatalog, spec::identifier::Identifier as IcebergIdentifier, }; -use icebucket_metastore::{error::MetastoreError, Metastore}; -use icebucket_utils::list_config::ListConfig; use object_store::local::LocalFileSystem; use object_store::ObjectStore; use snafu::ResultExt; use url::Url; -pub const DEFAULT_CATALOG: &str = "icebucket"; +pub const DEFAULT_CATALOG: &str = "embucket"; pub type TableProviderCache = DashMap>; pub type SchemaProviderCache = DashMap; pub type CatalogProviderCache = DashMap; #[derive(Clone)] -pub struct IceBucketDFMetastore { +pub struct DFMetastore { pub metastore: Arc, pub mirror: Arc, pub table_object_store: Arc>>, pub catalogs: DashMap>, } -impl IceBucketDFMetastore { +impl DFMetastore { pub fn new(metastore: Arc) -> Self { let table_object_store: DashMap> = DashMap::new(); table_object_store.insert("file://".to_string(), Arc::new(LocalFileSystem::new())); @@ -123,7 +123,7 @@ impl IceBucketDFMetastore { .insert(get_url_key(&url), table_object_store.clone()); let table_provider = match table.format { - icebucket_metastore::IceBucketTableFormat::Parquet => { + embucket_metastore::TableFormat::Parquet => { let parq_read_options = ParquetReadOptions::default(); let listing_options = parq_read_options.to_listing_options( ctx.state().config(), @@ -145,8 +145,8 @@ impl IceBucketDFMetastore { ListingTable::try_new(config).context(ex_error::DataFusionSnafu)?, ) as Arc } - icebucket_metastore::IceBucketTableFormat::Iceberg => { - let bridge = Arc::new(IceBucketIcebergBridge { + embucket_metastore::TableFormat::Iceberg => { + let bridge = Arc::new(IcebergBridge { metastore: self.metastore.clone(), database: table.ident.clone().database, object_store: table_object_store.clone(), @@ -198,9 +198,9 @@ impl IceBucketDFMetastore { } } -impl std::fmt::Debug for IceBucketDFMetastore { +impl std::fmt::Debug for DFMetastore { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("IceBucketDFMetastore").finish() + f.debug_struct("DFMetastore").finish() } } @@ -215,7 +215,7 @@ fn get_url_key(url: &Url) -> String { ) } -impl ObjectStoreRegistry for IceBucketDFMetastore { +impl ObjectStoreRegistry for DFMetastore { fn register_store( &self, url: &Url, @@ -238,7 +238,7 @@ impl ObjectStoreRegistry for IceBucketDFMetastore { } // Explore using AsyncCatalogProviderList alongside CatalogProviderList -impl CatalogProviderList for IceBucketDFMetastore { +impl CatalogProviderList for DFMetastore { fn as_any(&self) -> &dyn Any { self } @@ -268,10 +268,10 @@ impl CatalogProviderList for IceBucketDFMetastore { if !self.mirror.contains_key(name) { return None; } - let iceberg_catalog = IceBucketIcebergBridge::new(self.metastore.clone(), name.to_string()) + let iceberg_catalog = IcebergBridge::new(self.metastore.clone(), name.to_string()) .ok() .map(Arc::new)?; - let catalog: Arc = Arc::new(IceBucketDFCatalog { + let catalog: Arc = Arc::new(DFCatalog { ident: name.to_string(), metastore: self.metastore.clone(), mirror: self.mirror.clone(), diff --git a/crates/runtime/src/execution/catalogs/schema.rs b/crates/runtime/src/execution/catalogs/schema.rs index 1cb09c4e2..742aa2ae0 100644 --- a/crates/runtime/src/execution/catalogs/schema.rs +++ b/crates/runtime/src/execution/catalogs/schema.rs @@ -19,11 +19,11 @@ use crate::execution::catalogs::metastore::CatalogProviderCache; use async_trait::async_trait; use datafusion::catalog::{SchemaProvider, TableProvider}; use datafusion_common::{exec_err, DataFusionError, Result as DFResult}; -use icebucket_metastore::Metastore; +use embucket_metastore::Metastore; use std::any::Any; use std::sync::Arc; -pub struct IceBucketDFSchema { +pub struct DFSchema { pub database: String, pub schema: String, pub metastore: Arc, @@ -31,9 +31,9 @@ pub struct IceBucketDFSchema { } #[allow(clippy::missing_fields_in_debug)] -impl std::fmt::Debug for IceBucketDFSchema { +impl std::fmt::Debug for DFSchema { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("IceBucketDFSchema") + f.debug_struct("DFSchema") .field("database", &self.database) .field("schema", &self.schema) .field("metastore", &"") @@ -42,7 +42,7 @@ impl std::fmt::Debug for IceBucketDFSchema { } #[async_trait] -impl SchemaProvider for IceBucketDFSchema { +impl SchemaProvider for DFSchema { /// Returns the owner of the Schema, default is None. This value is reported /// as part of `information_tables.schemata fn owner_name(&self) -> Option<&str> { diff --git a/crates/runtime/src/execution/datafusion/error.rs b/crates/runtime/src/execution/datafusion/error.rs index f1d70dd56..f269bb831 100644 --- a/crates/runtime/src/execution/datafusion/error.rs +++ b/crates/runtime/src/execution/datafusion/error.rs @@ -21,7 +21,7 @@ use snafu::prelude::*; #[derive(Snafu, Debug)] #[snafu(visibility(pub(crate)))] -pub enum IceBucketSQLError { +pub enum SQLError { #[snafu(display("Arrow error: {source}"))] Arrow { source: arrow::error::ArrowError }, @@ -64,4 +64,4 @@ pub enum IceBucketSQLError { NotImplemented { message: String }, } -pub type IceBucketSQLResult = std::result::Result; +pub type SQLResult = std::result::Result; diff --git a/crates/runtime/src/execution/datafusion/mod.rs b/crates/runtime/src/execution/datafusion/mod.rs index 973ad247e..aa37abb88 100644 --- a/crates/runtime/src/execution/datafusion/mod.rs +++ b/crates/runtime/src/execution/datafusion/mod.rs @@ -18,5 +18,6 @@ //pub mod analyzer; //pub mod error; pub mod context_provider; +pub mod error; pub mod functions; pub mod type_planner; diff --git a/crates/runtime/src/execution/datafusion/type_planner.rs b/crates/runtime/src/execution/datafusion/type_planner.rs index 73622e5ff..00ffcd8e6 100644 --- a/crates/runtime/src/execution/datafusion/type_planner.rs +++ b/crates/runtime/src/execution/datafusion/type_planner.rs @@ -24,9 +24,9 @@ use datafusion::sql::utils::make_decimal_type; use datafusion_common::{not_impl_err, DataFusionError}; #[derive(Debug)] -pub struct IceBucketTypePlanner {} +pub struct CustomTypePlanner {} -impl TypePlanner for IceBucketTypePlanner { +impl TypePlanner for CustomTypePlanner { fn plan_type(&self, sql_type: &ast::DataType) -> Result> { match sql_type { SQLDataType::Int32 => Ok(Some(DataType::Int32)), diff --git a/crates/runtime/src/execution/error.rs b/crates/runtime/src/execution/error.rs index 5ecdc9ec8..e779db629 100644 --- a/crates/runtime/src/execution/error.rs +++ b/crates/runtime/src/execution/error.rs @@ -63,7 +63,7 @@ pub enum ExecutionError { #[snafu(display("Metastore error: {source}"))] Metastore { - source: icebucket_metastore::error::MetastoreError, + source: embucket_metastore::error::MetastoreError, }, #[snafu(display("Database {db} not found"))] diff --git a/crates/runtime/src/execution/query.rs b/crates/runtime/src/execution/query.rs index 4d635ded3..6901870fe 100644 --- a/crates/runtime/src/execution/query.rs +++ b/crates/runtime/src/execution/query.rs @@ -37,16 +37,17 @@ use datafusion_expr::logical_plan::dml::WriteOp; use datafusion_expr::CreateMemoryTable; use datafusion_expr::DdlStatement; use datafusion_iceberg::catalog::catalog::IcebergCatalog; +use embucket_metastore::{ + Metastore, SchemaIdent as MetastoreSchemaIdent, + TableCreateRequest as MetastoreTableCreateRequest, TableFormat as MetastoreTableFormat, + TableIdent as MetastoreTableIdent, +}; use iceberg_rust::catalog::create::CreateTableBuilder; use iceberg_rust::catalog::Catalog; use iceberg_rust::spec::arrow::schema::new_fields_with_ids; +use iceberg_rust::spec::namespace::Namespace; use iceberg_rust::spec::schema::Schema; use iceberg_rust::spec::types::StructType; -use iceberg_rust_spec::namespace::Namespace; -use icebucket_metastore::{ - IceBucketSchemaIdent, IceBucketTableCreateRequest, IceBucketTableFormat, IceBucketTableIdent, - Metastore, -}; use object_store::aws::AmazonS3Builder; use serde::{Deserialize, Serialize}; use snafu::ResultExt; @@ -62,30 +63,25 @@ use std::ops::ControlFlow; use std::sync::Arc; use url::Url; -use super::catalogs::{catalog::IceBucketDFCatalog, metastore::IceBucketDFMetastore}; +use super::catalogs::{catalog::DFCatalog, metastore::DFMetastore}; use super::datafusion::context_provider::ExtendedSqlToRel; use super::datafusion::functions::visit_functions_expressions; use super::error::{self as ex_error, ExecutionError, ExecutionResult}; use super::utils::NormalizedIdent; -use super::session::IceBucketUserSession; +use super::session::UserSession; #[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct IceBucketQueryContext { +pub struct QueryContext { pub database: Option, pub schema: Option, } -pub enum IceBucketQueryState { - Raw(String), - Preprocessed(String), -} - -pub struct IceBucketQuery { +pub struct UserQuery { pub metastore: Arc, pub query: String, - pub session: Arc, - pub query_context: IceBucketQueryContext, + pub session: Arc, + pub query_context: QueryContext, } pub enum IcebergCatalogResult { @@ -93,12 +89,8 @@ pub enum IcebergCatalogResult { Result(ExecutionResult>), } -impl IceBucketQuery { - pub(super) fn new( - session: Arc, - query: S, - query_context: IceBucketQueryContext, - ) -> Self +impl UserQuery { + pub(super) fn new(session: Arc, query: S, query_context: QueryContext) -> Self where S: Into, { @@ -129,7 +121,7 @@ impl IceBucketQuery { .database .clone() .or_else(|| self.session.get_session_variable("database")) - .or_else(|| Some("icebucket".to_string())) + .or_else(|| Some("embucket".to_string())) } fn current_schema(&self) -> Option { @@ -147,7 +139,7 @@ impl IceBucketQuery { .state() .catalog_list() .as_any() - .downcast_ref::() + .downcast_ref::() { catalog_list_impl.refresh(&self.session.ctx).await } else { @@ -352,10 +344,8 @@ impl IceBucketQuery { ) -> IcebergCatalogResult { if let Some(iceberg_catalog) = catalog.as_any().downcast_ref::() { IcebergCatalogResult::Catalog(iceberg_catalog.catalog()) - } else if let Some(icebucket_catalog) = - catalog.as_any().downcast_ref::() - { - IcebergCatalogResult::Catalog(icebucket_catalog.catalog()) + } else if let Some(embucket_catalog) = catalog.as_any().downcast_ref::() { + IcebergCatalogResult::Catalog(embucket_catalog.catalog()) } else if catalog .as_any() .downcast_ref::() @@ -379,7 +369,7 @@ impl IceBucketQuery { ), }); }; - let ident: IceBucketTableIdent = self.resolve_table_ident(names[0].0.clone())?.into(); + let ident: MetastoreTableIdent = self.resolve_table_ident(names[0].0.clone())?.into(); let plan = self.sql_statement_to_plan(statement).await?; let catalog = self.get_catalog(ident.database.as_str())?; let iceberg_catalog = match self @@ -433,7 +423,7 @@ impl IceBucketQuery { // Check if it already exists, if it is - drop it // For now we behave as CREATE OR REPLACE // TODO support CREATE without REPLACE - let ident: IceBucketTableIdent = new_table_ident.into(); + let ident: MetastoreTableIdent = new_table_ident.into(); let catalog = self.get_catalog(ident.database.as_str())?; let schema_provider = @@ -489,7 +479,7 @@ impl IceBucketQuery { &self, catalog: Arc, table_location: Option, - ident: IceBucketTableIdent, + ident: MetastoreTableIdent, plan: LogicalPlan, ) -> ExecutionResult<()> { let iceberg_catalog = match self @@ -534,7 +524,7 @@ impl IceBucketQuery { statement: CreateExternalTable, ) -> ExecutionResult> { let table_location = statement.location.clone(); - let table_format = IceBucketTableFormat::from(statement.file_type); + let table_format = MetastoreTableFormat::from(statement.file_type); let session_context = HashMap::new(); let session_context_planner = SessionContextProvider { state: &self.session.ctx.state(), @@ -554,9 +544,9 @@ impl IceBucketQuery { // TODO: Use the options with the table format in the future let _table_options = statement.options.clone(); - let table_ident: IceBucketTableIdent = self.resolve_table_ident(statement.name.0)?.into(); + let table_ident: MetastoreTableIdent = self.resolve_table_ident(statement.name.0)?.into(); - let table_create_request = IceBucketTableCreateRequest { + let table_create_request = MetastoreTableCreateRequest { ident: table_ident.clone(), schema: Schema::builder() .with_schema_id(0) @@ -830,7 +820,7 @@ impl IceBucketQuery { }); }; - let ident: IceBucketSchemaIdent = self.resolve_schema_ident(schema_name.0)?.into(); + let ident: MetastoreSchemaIdent = self.resolve_schema_ident(schema_name.0)?.into(); let catalog = self.get_catalog(ident.database.as_str())?; if catalog.schema(ident.schema.as_str()).is_some() && if_not_exists { return Err(ExecutionError::ObjectAlreadyExists { diff --git a/crates/runtime/src/execution/service.rs b/crates/runtime/src/execution/service.rs index c452ceb0a..c3c37dce6 100644 --- a/crates/runtime/src/execution/service.rs +++ b/crates/runtime/src/execution/service.rs @@ -30,11 +30,11 @@ use snafu::ResultExt; use super::{ models::ColumnInfo, - query::IceBucketQueryContext, - session::IceBucketUserSession, + query::QueryContext, + session::UserSession, utils::{convert_record_batches, Config}, }; -use icebucket_metastore::{IceBucketTableIdent, Metastore}; +use embucket_metastore::{Metastore, TableIdent as MetastoreTableIdent}; use tokio::sync::RwLock; use uuid::Uuid; @@ -42,7 +42,7 @@ use super::error::{self as ex_error, ExecutionError, ExecutionResult}; pub struct ExecutionService { metastore: Arc, - df_sessions: Arc>>>, + df_sessions: Arc>>>, config: Config, } @@ -59,7 +59,7 @@ impl ExecutionService { pub async fn create_session(&self, session_id: String) -> ExecutionResult<()> { let session_exists = { self.df_sessions.read().await.contains_key(&session_id) }; if !session_exists { - let user_session = IceBucketUserSession::new(self.metastore.clone()).await?; + let user_session = UserSession::new(self.metastore.clone()).await?; tracing::trace!("Acuiring write lock for df_sessions"); let mut session_list_mut = self.df_sessions.write().await; tracing::trace!("Acquired write lock for df_sessions"); @@ -82,7 +82,7 @@ impl ExecutionService { &self, session_id: &str, query: &str, - query_context: IceBucketQueryContext, + query_context: QueryContext, ) -> ExecutionResult<(Vec, Vec)> { let sessions = self.df_sessions.read().await; let user_session = @@ -128,7 +128,7 @@ impl ExecutionService { pub async fn upload_data_to_table( &self, session_id: &str, - table_ident: &IceBucketTableIdent, + table_ident: &MetastoreTableIdent, data: Bytes, file_name: &str, format: Format, @@ -219,7 +219,7 @@ impl ExecutionService { format!("CREATE TABLE {table_ident} AS SELECT * FROM {table}") }; - let query = user_session.query(&query, IceBucketQueryContext::default()); + let query = user_session.query(&query, QueryContext::default()); Box::pin(query.execute()).await?; user_session diff --git a/crates/runtime/src/execution/session.rs b/crates/runtime/src/execution/session.rs index eebaffc22..b4f1878a3 100644 --- a/crates/runtime/src/execution/session.rs +++ b/crates/runtime/src/execution/session.rs @@ -15,13 +15,13 @@ // specific language governing permissions and limitations // under the License. -use super::catalogs::metastore::{IceBucketDFMetastore, DEFAULT_CATALOG}; +use super::catalogs::metastore::{DFMetastore, DEFAULT_CATALOG}; use super::datafusion::functions::geospatial::register_udfs as register_geo_udfs; use super::datafusion::functions::register_udfs; -use super::datafusion::type_planner::IceBucketTypePlanner; +use super::datafusion::type_planner::CustomTypePlanner; use super::dedicated_executor::DedicatedExecutor; use super::error::{self as ex_error, ExecutionResult}; -use super::query::{IceBucketQuery, IceBucketQueryContext}; +use super::query::{QueryContext, UserQuery}; use aws_config::{BehaviorVersion, Region, SdkConfig}; use aws_credential_types::provider::SharedCredentialsProvider; use aws_credential_types::Credentials; @@ -35,30 +35,30 @@ use datafusion::sql::planner::IdentNormalizer; use datafusion_common::config::{ConfigEntry, ConfigExtension, ExtensionOptions}; use datafusion_iceberg::catalog::catalog::IcebergCatalog as DataFusionIcebergCatalog; use datafusion_iceberg::planner::IcebergQueryPlanner; +use embucket_metastore::{AwsCredentials, Metastore, VolumeType as MetastoreVolumeType}; +use embucket_utils::list_config::ListConfig; use geodatafusion::udf::native::register_native as register_geo_native; use iceberg_rust::object_store::ObjectStoreBuilder; use iceberg_s3tables_catalog::S3TablesCatalog; -use icebucket_metastore::{AwsCredentials, IceBucketVolumeType, Metastore}; -use icebucket_utils::list_config::ListConfig; use snafu::ResultExt; use std::any::Any; use std::collections::HashMap; use std::env; use std::sync::Arc; -pub struct IceBucketUserSession { +pub struct UserSession { pub metastore: Arc, pub ctx: SessionContext, pub ident_normalizer: IdentNormalizer, pub executor: DedicatedExecutor, } -impl IceBucketUserSession { +impl UserSession { pub async fn new(metastore: Arc) -> ExecutionResult { let sql_parser_dialect = env::var("SQL_PARSER_DIALECT").unwrap_or_else(|_| "snowflake".to_string()); - let catalog_list_impl = Arc::new(IceBucketDFMetastore::new(metastore.clone())); + let catalog_list_impl = Arc::new(DFMetastore::new(metastore.clone())); let runtime_config = RuntimeEnvBuilder::new() .with_object_store_registry(catalog_list_impl.clone()) @@ -68,7 +68,7 @@ impl IceBucketUserSession { let state = SessionStateBuilder::new() .with_config( SessionConfig::new() - .with_option_extension(IceBucketSessionParams::default()) + .with_option_extension(SessionParams::default()) .with_information_schema(true) // Cannot create catalog (database) automatic since it requires default volume .with_create_default_catalog_and_schema(false) @@ -79,7 +79,7 @@ impl IceBucketUserSession { .with_runtime_env(Arc::new(runtime_config)) .with_catalog_list(catalog_list_impl.clone()) .with_query_planner(Arc::new(IcebergQueryPlanner {})) - .with_type_planner(Arc::new(IceBucketTypePlanner {})) + .with_type_planner(Arc::new(CustomTypePlanner {})) .build(); let mut ctx = SessionContext::new_with_state(state); register_udfs(&mut ctx).context(ex_error::RegisterUDFSnafu)?; @@ -109,7 +109,7 @@ impl IceBucketUserSession { .context(ex_error::MetastoreSnafu)? .into_iter() .filter_map(|volume| { - if let IceBucketVolumeType::S3Tables(s3_volume) = volume.volume.clone() { + if let MetastoreVolumeType::S3Tables(s3_volume) = volume.volume.clone() { Some(s3_volume) } else { None @@ -153,15 +153,11 @@ impl IceBucketUserSession { Ok(()) } - pub fn query( - self: &Arc, - query: S, - query_context: IceBucketQueryContext, - ) -> IceBucketQuery + pub fn query(self: &Arc, query: S, query_context: QueryContext) -> UserQuery where S: Into, { - IceBucketQuery::new(self.clone(), query.into(), query_context) + UserQuery::new(self.clone(), query.into(), query_context) } pub fn set_session_variable( @@ -175,7 +171,7 @@ impl IceBucketUserSession { .config_mut() .options_mut() .extensions - .get_mut::(); + .get_mut::(); if let Some(cfg) = config { if set { cfg.set_properties(params) @@ -191,11 +187,7 @@ impl IceBucketUserSession { #[must_use] pub fn get_session_variable(&self, variable: &str) -> Option { let state = self.ctx.state(); - let config = state - .config() - .options() - .extensions - .get::(); + let config = state.config().options().extensions.get::(); if let Some(cfg) = config { return cfg.properties.get(variable).cloned(); } @@ -204,11 +196,11 @@ impl IceBucketUserSession { } #[derive(Default, Debug, Clone)] -pub struct IceBucketSessionParams { +pub struct SessionParams { pub properties: HashMap, } -impl IceBucketSessionParams { +impl SessionParams { pub fn set_properties(&mut self, properties: HashMap) -> DFResult<()> { for (key, value) in properties { self.properties @@ -225,11 +217,11 @@ impl IceBucketSessionParams { } } -impl ConfigExtension for IceBucketSessionParams { +impl ConfigExtension for SessionParams { const PREFIX: &'static str = "session_params"; } -impl ExtensionOptions for IceBucketSessionParams { +impl ExtensionOptions for SessionParams { fn as_any(&self) -> &dyn Any { self } diff --git a/crates/runtime/src/execution/tests/query.rs b/crates/runtime/src/execution/tests/query.rs index 88b3a2956..988fe840c 100644 --- a/crates/runtime/src/execution/tests/query.rs +++ b/crates/runtime/src/execution/tests/query.rs @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -use crate::execution::query::{IceBucketQuery, IceBucketQueryContext}; -use crate::execution::session::IceBucketUserSession; +use crate::execution::query::{QueryContext, UserQuery}; +use crate::execution::session::UserSession; use crate::execution::service::ExecutionService; use crate::execution::utils::{Config, DataSerializationFormat}; @@ -26,9 +26,10 @@ use datafusion::sql::parser::{DFParser, Statement as DFStatement}; use datafusion::sql::sqlparser::ast::visit_expressions; use datafusion::sql::sqlparser::ast::Statement as SQLStatement; use datafusion::sql::sqlparser::ast::{Expr, ObjectName}; -use icebucket_metastore::Metastore; -use icebucket_metastore::{ - IceBucketDatabase, IceBucketSchema, IceBucketSchemaIdent, IceBucketTableIdent, IceBucketVolume, +use embucket_metastore::Metastore; +use embucket_metastore::{ + Database as MetastoreDatabase, Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent, + TableIdent as MetastoreTableIdent, Volume as MetastoreVolume, }; use sqlparser::ast::{ Function, FunctionArg, FunctionArgExpr, FunctionArgumentList, FunctionArguments, @@ -61,11 +62,11 @@ impl<'a, T> Test<'a, T> { async fn test_timestamp_keywords_postprocess() { let metastore = SlateDBMetastore::new_in_memory().await; let session = Arc::new( - IceBucketUserSession::new(metastore) + UserSession::new(metastore) .await .expect("Failed to create user session"), ); - let query_context = IceBucketQueryContext::default(); + let query_context = QueryContext::default(); let test = vec![ Test::new( "SELECT dateadd(year, 5, '2025-06-01')", @@ -121,7 +122,7 @@ async fn test_timestamp_keywords_postprocess() { for test in test.iter() { let query = session.query(test.input, query_context.clone()); let mut statement = query.parse_query().unwrap(); - IceBucketQuery::postprocess_query_statement(&mut statement); + UserQuery::postprocess_query_statement(&mut statement); if let DFStatement::Statement(statement) = statement { visit_expressions(&statement, |expr| { if let Expr::Function(Function { @@ -177,7 +178,7 @@ fn test_postprocess_query_statement_functions_expressions() { for (init, exp) in args { let statement = DFParser::parse_sql(init).unwrap().pop_front(); if let Some(mut s) = statement { - IceBucketQuery::postprocess_query_statement(&mut s); + UserQuery::postprocess_query_statement(&mut s); assert_eq!(s.to_string(), exp); } } @@ -188,11 +189,11 @@ fn test_postprocess_query_statement_functions_expressions() { async fn test_context_name_injection() { let metastore = SlateDBMetastore::new_in_memory().await; let session = Arc::new( - IceBucketUserSession::new(metastore) + UserSession::new(metastore) .await .expect("Failed to create user session"), ); - let query1 = session.query("SELECT * FROM table1", IceBucketQueryContext::default()); + let query1 = session.query("SELECT * FROM table1", QueryContext::default()); let query_statement = if let DFStatement::Statement(statement) = query1.parse_query().expect("Failed to parse query") { @@ -213,7 +214,7 @@ async fn test_context_name_injection() { let query2 = session.query( "SELECT * from table2", - IceBucketQueryContext { + QueryContext { database: Some("db2".to_string()), schema: Some("sch2".to_string()), }, @@ -245,7 +246,7 @@ async fn test_context_name_injection() { .collect(), ) .expect("Failed to set session variable"); - let query3 = session.query("SELECT * from table3", IceBucketQueryContext::default()); + let query3 = session.query("SELECT * from table3", QueryContext::default()); let query_statement3 = if let DFStatement::Statement(statement) = query3.parse_query().expect("Failed to parse query") { @@ -278,7 +279,7 @@ async fn test_context_name_injection() { .expect("Failed to set session variable"); let query4 = session.query( "SELECT * from table4 INNER JOIN table4_1 ON 1=1", - IceBucketQueryContext::default(), + QueryContext::default(), ); let query_statement4 = if let DFStatement::Statement(statement) = query4.parse_query().expect("Failed to parse query") @@ -303,15 +304,15 @@ async fn test_context_name_injection() { #[tokio::test] async fn test_create_table_with_timestamp_nanosecond() { let (execution_svc, _, session_id) = prepare_env().await; - let table_ident = IceBucketTableIdent { - database: "icebucket".to_string(), + let table_ident = MetastoreTableIdent { + database: "embucket".to_string(), schema: "public".to_string(), table: "target_table".to_string(), }; // Verify that the file was uploaded successfully by running select * from the table - let query = format!("CREATE TABLE {table_ident} (id INT, ts TIMESTAMP_NTZ(9)) as VALUES (1, '2025-04-09T21:11:23'), (2, '2025-04-09T21:11:00');"); + let query = format!("CREATE TABLE {}.{}.{} (id INT, ts TIMESTAMP_NTZ(9)) as VALUES (1, '2025-04-09T21:11:23'), (2, '2025-04-09T21:11:00');", table_ident.database, table_ident.schema, table_ident.table); let (rows, _) = execution_svc - .query(&session_id, &query, IceBucketQueryContext::default()) + .query(&session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); @@ -330,15 +331,15 @@ async fn test_create_table_with_timestamp_nanosecond() { #[tokio::test] async fn test_drop_table() { let (execution_svc, _, session_id) = prepare_env().await; - let table_ident = IceBucketTableIdent { - database: "icebucket".to_string(), + let table_ident = MetastoreTableIdent { + database: "embucket".to_string(), schema: "public".to_string(), table: "target_table".to_string(), }; // Verify that the file was uploaded successfully by running select * from the table let query = format!("CREATE TABLE {table_ident} (id INT) as VALUES (1), (2);"); let (rows, _) = execution_svc - .query(&session_id, &query, IceBucketQueryContext::default()) + .query(&session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); @@ -355,14 +356,14 @@ async fn test_drop_table() { let query = format!("DROP TABLE {table_ident};"); execution_svc - .query(&session_id, &query, IceBucketQueryContext::default()) + .query(&session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); // Verify that the table is not exists let query = format!("SELECT * FROM {table_ident};"); let res = execution_svc - .query(&session_id, &query, IceBucketQueryContext::default()) + .query(&session_id, &query, QueryContext::default()) .await; assert!(res.is_err()); @@ -377,13 +378,13 @@ async fn test_drop_table() { #[tokio::test] async fn test_create_schema() { let (execution_svc, metastore, session_id) = prepare_env().await; - let schema_ident = IceBucketSchemaIdent { - database: "icebucket".to_string(), + let schema_ident = MetastoreSchemaIdent { + database: "embucket".to_string(), schema: "public_new".to_string(), }; let query = format!("CREATE SCHEMA {schema_ident};"); execution_svc - .query(&session_id, &query, IceBucketQueryContext::default()) + .query(&session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); // TODO use "SHOW SCHEMAS" sql @@ -398,32 +399,32 @@ async fn prepare_env() -> (ExecutionService, Arc, String) { metastore .create_volume( &"test_volume".to_string(), - IceBucketVolume::new( + MetastoreVolume::new( "test_volume".to_string(), - icebucket_metastore::IceBucketVolumeType::Memory, + embucket_metastore::VolumeType::Memory, ), ) .await .expect("Failed to create volume"); metastore .create_database( - &"icebucket".to_string(), - IceBucketDatabase { - ident: "icebucket".to_string(), + &"embucket".to_string(), + MetastoreDatabase { + ident: "embucket".to_string(), properties: None, volume: "test_volume".to_string(), }, ) .await .expect("Failed to create database"); - let schema_ident = IceBucketSchemaIdent { - database: "icebucket".to_string(), + let schema_ident = MetastoreSchemaIdent { + database: "embucket".to_string(), schema: "public".to_string(), }; metastore .create_schema( &schema_ident.clone(), - IceBucketSchema { + MetastoreSchema { ident: schema_ident, properties: None, }, diff --git a/crates/runtime/src/execution/tests/service.rs b/crates/runtime/src/execution/tests/service.rs index 574c2e892..7f58c65a5 100644 --- a/crates/runtime/src/execution/tests/service.rs +++ b/crates/runtime/src/execution/tests/service.rs @@ -15,15 +15,16 @@ // specific language governing permissions and limitations // under the License. -use crate::execution::query::IceBucketQueryContext; +use crate::execution::query::QueryContext; use crate::execution::service::ExecutionService; use crate::execution::utils::{Config, DataSerializationFormat}; use crate::SlateDBMetastore; use datafusion::{arrow::csv::reader::Format, assert_batches_eq}; -use icebucket_metastore::models::table::IceBucketTableIdent; -use icebucket_metastore::Metastore; -use icebucket_metastore::{ - IceBucketDatabase, IceBucketSchema, IceBucketSchemaIdent, IceBucketVolume, +use embucket_metastore::models::table::TableIdent as MetastoreTableIdent; +use embucket_metastore::Metastore; +use embucket_metastore::{ + Database as MetastoreDatabase, Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent, + Volume as MetastoreVolume, }; #[tokio::test] @@ -46,7 +47,7 @@ async fn test_execute_always_returns_schema() { .query( "test_session_id", "SELECT 1 AS a, 2.0 AS b, '3' AS c WHERE False", - IceBucketQueryContext::default(), + QueryContext::default(), ) .await .expect("Failed to execute query"); @@ -64,32 +65,32 @@ async fn test_service_upload_file() { metastore .create_volume( &"test_volume".to_string(), - IceBucketVolume::new( + MetastoreVolume::new( "test_volume".to_string(), - icebucket_metastore::IceBucketVolumeType::Memory, + embucket_metastore::VolumeType::Memory, ), ) .await .expect("Failed to create volume"); metastore .create_database( - &"icebucket".to_string(), - IceBucketDatabase { - ident: "icebucket".to_string(), + &"embucket".to_string(), + MetastoreDatabase { + ident: "embucket".to_string(), properties: None, volume: "test_volume".to_string(), }, ) .await .expect("Failed to create database"); - let schema_ident = IceBucketSchemaIdent { - database: "icebucket".to_string(), + let schema_ident = MetastoreSchemaIdent { + database: "embucket".to_string(), schema: "public".to_string(), }; metastore .create_schema( &schema_ident.clone(), - IceBucketSchema { + MetastoreSchema { ident: schema_ident, properties: None, }, @@ -98,8 +99,8 @@ async fn test_service_upload_file() { .expect("Failed to create schema"); let file_name = "test.csv"; - let table_ident = IceBucketTableIdent { - database: "icebucket".to_string(), + let table_ident = MetastoreTableIdent { + database: "embucket".to_string(), schema: "public".to_string(), table: "target_table".to_string(), }; @@ -137,7 +138,7 @@ async fn test_service_upload_file() { // Verify that the file was uploaded successfully by running select * from the table let query = format!("SELECT * FROM {}", table_ident.table); let (rows, _) = execution_svc - .query(session_id, &query, IceBucketQueryContext::default()) + .query(session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); @@ -163,7 +164,7 @@ async fn test_service_upload_file() { // Verify that the file was uploaded successfully by running select * from the table let query = format!("SELECT * FROM {}", table_ident.table); let (rows, _) = execution_svc - .query(session_id, &query, IceBucketQueryContext::default()) + .query(session_id, &query, QueryContext::default()) .await .expect("Failed to execute query"); @@ -189,42 +190,40 @@ async fn test_service_create_table_file_volume() { let metastore = SlateDBMetastore::new_in_memory().await; // Create a temporary directory for the file volume - let temp_dir = std::env::temp_dir().join("icebucket_test_file_volume"); + let temp_dir = std::env::temp_dir().join("test_file_volume"); let _ = std::fs::create_dir_all(&temp_dir); let temp_path = temp_dir.to_str().expect("Failed to convert path to string"); metastore .create_volume( &"test_volume".to_string(), - IceBucketVolume::new( + MetastoreVolume::new( "test_volume".to_string(), - icebucket_metastore::IceBucketVolumeType::File( - icebucket_metastore::IceBucketFileVolume { - path: temp_path.to_string(), - }, - ), + embucket_metastore::VolumeType::File(embucket_metastore::FileVolume { + path: temp_path.to_string(), + }), ), ) .await .expect("Failed to create volume"); metastore .create_database( - &"icebucket".to_string(), - IceBucketDatabase { - ident: "icebucket".to_string(), + &"embucket".to_string(), + MetastoreDatabase { + ident: "embucket".to_string(), properties: None, volume: "test_volume".to_string(), }, ) .await .expect("Failed to create database"); - let schema_ident = IceBucketSchemaIdent { - database: "icebucket".to_string(), + let schema_ident = MetastoreSchemaIdent { + database: "embucket".to_string(), schema: "public".to_string(), }; metastore .create_schema( &schema_ident.clone(), - IceBucketSchema { + MetastoreSchema { ident: schema_ident, properties: None, }, @@ -232,8 +231,8 @@ async fn test_service_create_table_file_volume() { .await .expect("Failed to create schema"); - let table_ident = IceBucketTableIdent { - database: "icebucket".to_string(), + let table_ident = MetastoreTableIdent { + database: "embucket".to_string(), schema: "public".to_string(), table: "target_table".to_string(), }; @@ -251,11 +250,7 @@ async fn test_service_create_table_file_volume() { let create_table_sql = format!("CREATE TABLE {table_ident} (id INT, name STRING, value FLOAT) as VALUES (1, 'test1', 100.0), (2, 'test2', 200.0), (3, 'test3', 300.0)"); let (res, _) = execution_svc - .query( - session_id, - &create_table_sql, - IceBucketQueryContext::default(), - ) + .query(session_id, &create_table_sql, QueryContext::default()) .await .expect("Failed to create table"); @@ -272,7 +267,7 @@ async fn test_service_create_table_file_volume() { let insert_sql = format!("INSERT INTO {table_ident} (id, name, value) VALUES (4, 'test4', 400.0), (5, 'test5', 500.0)"); let (res, _) = execution_svc - .query(session_id, &insert_sql, IceBucketQueryContext::default()) + .query(session_id, &insert_sql, QueryContext::default()) .await .expect("Failed to insert data"); diff --git a/crates/runtime/src/execution/tests/snapshots/icebucket_runtime__execution__tests__query__context_name_injection.snap b/crates/runtime/src/execution/tests/snapshots/embucket_runtime__execution__tests__query__context_name_injection.snap similarity index 99% rename from crates/runtime/src/execution/tests/snapshots/icebucket_runtime__execution__tests__query__context_name_injection.snap rename to crates/runtime/src/execution/tests/snapshots/embucket_runtime__execution__tests__query__context_name_injection.snap index c962b0f00..60a149c7c 100644 --- a/crates/runtime/src/execution/tests/snapshots/icebucket_runtime__execution__tests__query__context_name_injection.snap +++ b/crates/runtime/src/execution/tests/snapshots/embucket_runtime__execution__tests__query__context_name_injection.snap @@ -1,7 +1,6 @@ --- source: crates/runtime/src/execution/tests/query.rs expression: "(from1, from2, from3, from4)" -snapshot_kind: text --- ( [ diff --git a/crates/runtime/src/execution/utils.rs b/crates/runtime/src/execution/utils.rs index 1e0f10477..333003811 100644 --- a/crates/runtime/src/execution/utils.rs +++ b/crates/runtime/src/execution/utils.rs @@ -28,7 +28,8 @@ use chrono::DateTime; use datafusion::arrow::array::ArrayRef; use datafusion::arrow::datatypes::DataType; use datafusion::common::Result as DataFusionResult; -use icebucket_metastore::{IceBucketSchemaIdent, IceBucketTableIdent}; +use embucket_metastore::SchemaIdent as MetastoreSchemaIdent; +use embucket_metastore::TableIdent as MetastoreTableIdent; use sqlparser::ast::{Ident, ObjectName}; use std::collections::HashMap; use std::sync::Arc; @@ -381,7 +382,7 @@ impl From<&NormalizedIdent> for String { } } -impl From for IceBucketTableIdent { +impl From for MetastoreTableIdent { fn from(ident: NormalizedIdent) -> Self { let ident = ident.0; // TODO check len, return err. This code is just tmp @@ -393,7 +394,7 @@ impl From for IceBucketTableIdent { } } -impl From for IceBucketSchemaIdent { +impl From for MetastoreSchemaIdent { fn from(ident: NormalizedIdent) -> Self { let ident = ident.0; Self { diff --git a/crates/runtime/src/http/catalog/handlers.rs b/crates/runtime/src/http/catalog/handlers.rs index 1462e7863..6557e9b59 100644 --- a/crates/runtime/src/http/catalog/handlers.rs +++ b/crates/runtime/src/http/catalog/handlers.rs @@ -23,15 +23,15 @@ use crate::http::metastore::error::{MetastoreAPIError, MetastoreAPIResult}; use crate::http::state::AppState; use axum::http::StatusCode; use axum::{extract::Path, extract::Query, extract::State, Json}; +use embucket_metastore::error::{self as metastore_error, MetastoreError}; +use embucket_metastore::{SchemaIdent as MetastoreSchemaIdent, TableIdent as MetastoreTableIdent}; +use embucket_utils::list_config::ListConfig; use iceberg_rest_catalog::models::{ CatalogConfig, CommitTableResponse, CreateNamespaceRequest, CreateNamespaceResponse, CreateTableRequest, GetNamespaceResponse, ListNamespacesResponse, ListTablesResponse, LoadTableResult, RegisterTableRequest, }; use iceberg_rust_spec::table_metadata::TableMetadata; -use icebucket_metastore::error::{self as metastore_error, MetastoreError}; -use icebucket_metastore::{IceBucketSchemaIdent, IceBucketTableIdent}; -use icebucket_utils::list_config::ListConfig; use object_store::ObjectStore; use serde_json::{from_slice, Value}; use snafu::ResultExt; @@ -57,7 +57,7 @@ pub async fn get_namespace( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, ) -> MetastoreAPIResult> { - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: database_name.clone(), schema: schema_name.clone(), }; @@ -80,7 +80,7 @@ pub async fn delete_namespace( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, ) -> MetastoreAPIResult { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); + let schema_ident = MetastoreSchemaIdent::new(database_name, schema_name); state .metastore .delete_schema(&schema_ident, true) @@ -108,7 +108,7 @@ pub async fn create_table( Path((database_name, schema_name)): Path<(String, String)>, Json(table): Json, ) -> MetastoreAPIResult> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table.name); + let table_ident = MetastoreTableIdent::new(&database_name, &schema_name, &table.name); let volume_ident = state .metastore .volume_for_table(&table_ident.clone()) @@ -133,7 +133,7 @@ pub async fn register_table( Path((database_name, schema_name)): Path<(String, String)>, Json(register): Json, ) -> MetastoreAPIResult> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, ®ister.name); + let table_ident = MetastoreTableIdent::new(&database_name, &schema_name, ®ister.name); let metadata_raw = state .metastore .volume_for_table(&table_ident) @@ -163,7 +163,7 @@ pub async fn commit_table( Path((database_name, schema_name, table_name)): Path<(String, String, String)>, Json(commit): Json, ) -> MetastoreAPIResult> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let table_ident = MetastoreTableIdent::new(&database_name, &schema_name, &table_name); let table_updates = to_table_commit(commit); let ib_table = state .metastore @@ -181,7 +181,7 @@ pub async fn get_table( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> MetastoreAPIResult> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let table_ident = MetastoreTableIdent::new(&database_name, &schema_name, &table_name); let table = state .metastore .get_table(&table_ident) @@ -202,7 +202,7 @@ pub async fn delete_table( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> MetastoreAPIResult { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let table_ident = MetastoreTableIdent::new(&database_name, &schema_name, &table_name); state .metastore .delete_table(&table_ident, true) @@ -216,7 +216,7 @@ pub async fn list_tables( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, ) -> MetastoreAPIResult> { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); + let schema_ident = MetastoreSchemaIdent::new(database_name, schema_name); let tables = state .metastore .list_tables(&schema_ident, ListConfig::default()) diff --git a/crates/runtime/src/http/catalog/schemas.rs b/crates/runtime/src/http/catalog/schemas.rs index e4ce0201d..87f4e04a3 100644 --- a/crates/runtime/src/http/catalog/schemas.rs +++ b/crates/runtime/src/http/catalog/schemas.rs @@ -15,23 +15,24 @@ // specific language governing permissions and limitations // under the License. +use embucket_metastore::{ + RwObject, Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent, + Table as MetastoreTable, TableCreateRequest as MetastoreTableCreateRequest, + TableFormat as MetastoreTableFormat, TableIdent as MetastoreTableIdent, + TableUpdate as MetastoreTableUpdate, VolumeIdent as MetastoreVolumeIdent, +}; use iceberg_rest_catalog::models::{ CreateNamespaceRequest, CreateNamespaceResponse, CreateTableRequest, GetNamespaceResponse, ListNamespacesResponse, ListTablesResponse, }; -use iceberg_rust::catalog::commit::{TableRequirement, TableUpdate}; +use iceberg_rust::catalog::commit::{TableRequirement, TableUpdate as IcebergTableUpdate}; use iceberg_rust_spec::identifier::Identifier; -use icebucket_metastore::{ - IceBucketSchema, IceBucketSchemaIdent, IceBucketTable, IceBucketTableCreateRequest, - IceBucketTableFormat, IceBucketTableIdent, IceBucketTableUpdate, IceBucketVolumeIdent, - RwObject, -}; use serde::{Deserialize, Serialize}; #[must_use] -pub fn to_schema(request: CreateNamespaceRequest, db: String) -> IceBucketSchema { - IceBucketSchema { - ident: IceBucketSchemaIdent { +pub fn to_schema(request: CreateNamespaceRequest, db: String) -> MetastoreSchema { + MetastoreSchema { + ident: MetastoreSchemaIdent { schema: request .namespace .first() @@ -44,7 +45,7 @@ pub fn to_schema(request: CreateNamespaceRequest, db: String) -> IceBucketSchema } #[must_use] -pub fn from_schema(schema: IceBucketSchema) -> CreateNamespaceResponse { +pub fn from_schema(schema: MetastoreSchema) -> CreateNamespaceResponse { CreateNamespaceResponse { namespace: vec![schema.ident.database], properties: schema.properties, @@ -52,7 +53,7 @@ pub fn from_schema(schema: IceBucketSchema) -> CreateNamespaceResponse { } #[must_use] -pub fn from_get_schema(schema: IceBucketSchema) -> GetNamespaceResponse { +pub fn from_get_schema(schema: MetastoreSchema) -> GetNamespaceResponse { GetNamespaceResponse { namespace: vec![schema.ident.database], properties: schema.properties, @@ -62,13 +63,13 @@ pub fn from_get_schema(schema: IceBucketSchema) -> GetNamespaceResponse { #[must_use] pub fn to_create_table( table: CreateTableRequest, - table_ident: IceBucketTableIdent, - volume_ident: Option, -) -> IceBucketTableCreateRequest { - IceBucketTableCreateRequest { + table_ident: MetastoreTableIdent, + volume_ident: Option, +) -> MetastoreTableCreateRequest { + MetastoreTableCreateRequest { ident: table_ident, properties: table.properties, - format: Some(IceBucketTableFormat::Iceberg), + format: Some(MetastoreTableFormat::Iceberg), location: table.location, schema: *table.schema, partition_spec: table.partition_spec.map(|spec| *spec), @@ -80,7 +81,7 @@ pub fn to_create_table( } #[must_use] -pub fn from_schemas_list(schemas: Vec>) -> ListNamespacesResponse { +pub fn from_schemas_list(schemas: Vec>) -> ListNamespacesResponse { let namespaces = schemas .into_iter() .map(|schema| vec![schema.data.ident.schema]) @@ -92,15 +93,15 @@ pub fn from_schemas_list(schemas: Vec>) -> ListNamespa } #[must_use] -pub fn to_table_commit(commit: CommitTable) -> IceBucketTableUpdate { - IceBucketTableUpdate { +pub fn to_table_commit(commit: CommitTable) -> MetastoreTableUpdate { + MetastoreTableUpdate { requirements: commit.requirements, updates: commit.updates, } } #[must_use] -pub fn from_tables_list(tables: Vec>) -> ListTablesResponse { +pub fn from_tables_list(tables: Vec>) -> ListTablesResponse { let identifiers = tables .into_iter() .map(|table| Identifier::new(&[table.data.ident.schema], &table.data.ident.table)) @@ -121,5 +122,5 @@ pub struct CommitTable { /// Assertions about the metadata that must be true to update the metadata pub requirements: Vec, /// Changes to the table metadata - pub updates: Vec, + pub updates: Vec, } diff --git a/crates/runtime/src/http/config.rs b/crates/runtime/src/http/config.rs index 18bb3abf3..a6c7c67da 100644 --- a/crates/runtime/src/http/config.rs +++ b/crates/runtime/src/http/config.rs @@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IceBucketWebConfig { +pub struct WebConfig { pub host: String, pub port: u16, pub allow_origin: Option, diff --git a/crates/runtime/src/http/control/handlers/mod.rs b/crates/runtime/src/http/control/handlers/mod.rs deleted file mode 100644 index 015239e59..000000000 --- a/crates/runtime/src/http/control/handlers/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -pub mod storage_profiles; -pub mod warehouses; diff --git a/crates/runtime/src/http/control/handlers/storage_profiles.rs b/crates/runtime/src/http/control/handlers/storage_profiles.rs deleted file mode 100644 index 5798fbe9b..000000000 --- a/crates/runtime/src/http/control/handlers/storage_profiles.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::http::control::schemas::storage_profiles::{ - AwsAccessKeyCredential, AwsRoleCredential, CloudProvider, CreateStorageProfilePayload, - Credentials, StorageProfile, -}; -use axum::{extract::Path, extract::State, Json}; -use control_plane::models::{StorageProfile as StorageProfileModel, StorageProfileCreateRequest}; -use std::result::Result; -use utoipa::OpenApi; -use uuid::Uuid; - -use crate::error::AppError; -use crate::state::AppState; - -#[derive(OpenApi)] -#[openapi( - paths( - create_storage_profile, - get_storage_profile, - delete_storage_profile, - list_storage_profiles, - ), - components(schemas( - CreateStorageProfilePayload, - StorageProfile, - Credentials, - AwsAccessKeyCredential, - AwsRoleCredential, - CloudProvider - ),) -)] -pub struct StorageProfileApi; - -#[utoipa::path( - post, - operation_id = "createStorageProfile", - path = "", - request_body = CreateStorageProfilePayload, - responses((status = 200, body = StorageProfile)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn create_storage_profile( - State(state): State, - Json(payload): Json, -) -> Result, AppError> { - let request: StorageProfileCreateRequest = payload.into(); - let profile: StorageProfileModel = state.control_svc.create_profile(&request).await?; - - Ok(Json(profile.into())) -} - -#[utoipa::path( - get, - operation_id = "getStorageProfile", - path = "/{storageProfileId}", - params(("storageProfileId" = Uuid, description = "Storage profile ID")), - responses((status = 200, body = StorageProfile)), -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_storage_profile( - State(state): State, - Path(id): Path, -) -> Result, AppError> { - let profile = state.control_svc.get_profile(id).await?; - - Ok(Json(profile.into())) -} - -#[utoipa::path( - delete, - operation_id = "deleteStorageProfile", - path = "/{storageProfileId}", - params(("storageProfileId" = Uuid, description = "Storage profile ID")), - responses((status = 200)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn delete_storage_profile( - State(state): State, - Path(id): Path, -) -> Result, AppError> { - state.control_svc.delete_profile(id).await?; - - Ok(Json(())) -} - -#[utoipa::path( - get, - operation_id = "listStorageProfiles", - path = "", - responses((status = 200, body = Vec)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn list_storage_profiles( - State(state): State, -) -> Result>, AppError> { - let profiles = state.control_svc.list_profiles().await?; - - Ok(Json(profiles.into_iter().map(Into::into).collect())) -} diff --git a/crates/runtime/src/http/control/handlers/warehouses.rs b/crates/runtime/src/http/control/handlers/warehouses.rs deleted file mode 100644 index 305d8e276..000000000 --- a/crates/runtime/src/http/control/handlers/warehouses.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::http::control::schemas::warehouses::{CreateWarehouseRequest, Warehouse}; -use axum::{extract::Path, extract::State, Json}; -use control_plane::models::{Warehouse as WarehouseModel, WarehouseCreateRequest}; -use std::result::Result; -use utoipa::OpenApi; -use uuid::Uuid; - -use crate::error::AppError; -use crate::state::AppState; - -// #[derive(OpenApi)] -// #[openapi( -// paths(create_storage_profile, get_storage_profile, delete_storage_profile, list_storage_profiles,), -// components(schemas(CreateStorageProfilePayload, StorageProfileSchema, Credentials, AwsAccessKeyCredential, AwsRoleCredential, CloudProvider),) -// )] -// pub struct StorageProfileApi; - -#[derive(OpenApi)] -#[openapi( - paths(create_warehouse, get_warehouse, delete_warehouse, list_warehouses,), - components(schemas(CreateWarehouseRequest, Warehouse,),) -)] -pub struct WarehouseApi; - -#[utoipa::path( - post, - operation_id = "createWarehouse", - path = "", - request_body = CreateWarehouseRequest, - responses((status = 200, body = Warehouse)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn create_warehouse( - State(state): State, - Json(payload): Json, -) -> Result, AppError> { - let request: WarehouseCreateRequest = payload.into(); - let profile: WarehouseModel = state.control_svc.create_warehouse(&request).await?; - - Ok(Json(profile.into())) -} - -#[utoipa::path( - get, - operation_id = "getWarehouse", - path = "/{warehouseId}", - params(("warehouseId" = Uuid, description = "Warehouse ID")), - responses((status = 200, body = Warehouse)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_warehouse( - State(state): State, - Path(id): Path, -) -> Result, AppError> { - let profile = state.control_svc.get_warehouse(id).await?; - - Ok(Json(profile.into())) -} - -#[utoipa::path( - delete, - operation_id = "deleteWarehouse", - path = "/{warehouseId}", - params(("warehouseId" = Uuid, description = "Warehouse ID")), - responses((status = 200)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn delete_warehouse( - State(state): State, - Path(id): Path, -) -> Result, AppError> { - state.control_svc.delete_warehouse(id).await?; - - Ok(Json(())) -} - -#[utoipa::path( - get, - operation_id = "listWarehouses", - path = "", - responses((status = 200, body = Vec)) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn list_warehouses( - State(state): State, -) -> Result>, AppError> { - let profiles = state.control_svc.list_warehouses().await?; - - Ok(Json(profiles.into_iter().map(Into::into).collect())) -} diff --git a/crates/runtime/src/http/control/mod.rs b/crates/runtime/src/http/control/mod.rs deleted file mode 100644 index 96e985d39..000000000 --- a/crates/runtime/src/http/control/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -pub mod handlers; -pub mod router; -pub mod schemas; diff --git a/crates/runtime/src/http/control/router.rs b/crates/runtime/src/http/control/router.rs deleted file mode 100644 index e6f5eb5dd..000000000 --- a/crates/runtime/src/http/control/router.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::state::AppState; -use axum::routing::{delete, get, post}; -use axum::Router; - -use crate::http::control::handlers::storage_profiles::{ - create_storage_profile, delete_storage_profile, get_storage_profile, list_storage_profiles, -}; -use crate::http::control::handlers::warehouses::{ - create_warehouse, delete_warehouse, get_warehouse, list_warehouses, -}; - -pub fn create_router() -> Router { - let sp_router = Router::new() - .route("/", post(create_storage_profile)) - .route("/{id}", get(get_storage_profile)) - .route("/{id}", delete(delete_storage_profile)) - .route("/", get(list_storage_profiles)); - - let wh_router = Router::new() - .route("/", post(create_warehouse)) - .route("/{id}", get(get_warehouse)) - .route("/{id}", delete(delete_warehouse)) - .route("/", get(list_warehouses)); - - Router::new() - .nest("/v1/storage-profile", sp_router) - .nest("/v1/warehouse", wh_router) -} diff --git a/crates/runtime/src/http/control/schemas/mod.rs b/crates/runtime/src/http/control/schemas/mod.rs deleted file mode 100644 index 015239e59..000000000 --- a/crates/runtime/src/http/control/schemas/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -pub mod storage_profiles; -pub mod warehouses; diff --git a/crates/runtime/src/http/control/schemas/storage_profiles.rs b/crates/runtime/src/http/control/schemas/storage_profiles.rs deleted file mode 100644 index 4a16394ab..000000000 --- a/crates/runtime/src/http/control/schemas/storage_profiles.rs +++ /dev/null @@ -1,243 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use chrono::NaiveDateTime; -use control_plane::models; -use serde::{Deserialize, Serialize}; -use std::option::Option; -use utoipa::ToSchema; -use uuid::Uuid; - -// Define the cloud provider enum -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, ToSchema)] -#[serde(rename_all = "lowercase")] -pub enum CloudProvider { - Aws, - Azure, - Gcp, - Fs, -} - -impl From for models::CloudProvider { - fn from(provider: CloudProvider) -> Self { - match provider { - CloudProvider::Aws => Self::AWS, - CloudProvider::Azure => Self::AZURE, - CloudProvider::Gcp => Self::GCS, - CloudProvider::Fs => Self::FS, - } - } -} -impl From for CloudProvider { - fn from(provider: models::CloudProvider) -> Self { - match provider { - models::CloudProvider::AWS => Self::Aws, - models::CloudProvider::AZURE => Self::Azure, - models::CloudProvider::GCS => Self::Gcp, - models::CloudProvider::FS => Self::Fs, - } - } -} - -// AWS Access Key Credentials -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, ToSchema)] -pub struct AwsAccessKeyCredential { - pub aws_access_key_id: String, - pub aws_secret_access_key: String, -} - -impl From for models::AwsAccessKeyCredential { - fn from(credential: AwsAccessKeyCredential) -> Self { - Self { - aws_access_key_id: credential.aws_access_key_id, - aws_secret_access_key: credential.aws_secret_access_key, - } - } -} -impl From for AwsAccessKeyCredential { - fn from(credential: models::AwsAccessKeyCredential) -> Self { - Self { - aws_access_key_id: credential.aws_access_key_id, - aws_secret_access_key: credential.aws_secret_access_key, - } - } -} - -// AWS Role Credentials -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, ToSchema)] -pub struct AwsRoleCredential { - pub role_arn: String, - pub external_id: String, -} - -impl From for models::AwsRoleCredential { - fn from(credential: AwsRoleCredential) -> Self { - Self { - role_arn: credential.role_arn, - external_id: credential.external_id, - } - } -} -impl From for AwsRoleCredential { - fn from(credential: models::AwsRoleCredential) -> Self { - Self { - role_arn: credential.role_arn, - external_id: credential.external_id, - } - } -} - -// Enum to represent either Access Key or Role Credentials -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, ToSchema)] -#[serde(tag = "credential_type")] // Enables tagged union based on credential type -pub enum Credentials { - #[serde(rename = "access_key")] - AccessKey(AwsAccessKeyCredential), - #[serde(rename = "role")] - Role(AwsRoleCredential), -} - -impl From for models::Credentials { - fn from(credential: Credentials) -> Self { - match credential { - Credentials::AccessKey(aws_credential) => Self::AccessKey(aws_credential.into()), - Credentials::Role(role_credential) => Self::Role(role_credential.into()), - } - } -} -impl From for Credentials { - fn from(credential: models::Credentials) -> Self { - match credential { - models::Credentials::AccessKey(aws_credential) => { - Self::AccessKey(aws_credential.into()) - } - models::Credentials::Role(role_credential) => Self::Role(role_credential.into()), - } - } -} - -// Request struct for creating a storage profile -#[derive(Serialize, Deserialize, Debug, ToSchema)] -pub struct CreateStorageProfilePayload { - #[serde(rename = "type")] - pub provider_type: CloudProvider, - pub region: Option, - pub bucket: Option, - pub credentials: Option, - pub sts_role_arn: Option, - pub endpoint: Option, -} - -impl From for models::StorageProfileCreateRequest { - fn from(payload: CreateStorageProfilePayload) -> Self { - Self { - r#type: payload.provider_type.into(), - region: payload.region, - bucket: payload.bucket, - credentials: payload.credentials.map(std::convert::Into::into), - sts_role_arn: payload.sts_role_arn, - endpoint: payload.endpoint, - validate_credentials: Option::from(false), - } - } -} - -// Response struct for returning a storage profile -#[derive(Serialize, Deserialize, Debug, ToSchema)] -pub struct StorageProfile { - pub id: Uuid, - #[serde(rename = "type")] - pub r#type: CloudProvider, - pub region: Option, - pub bucket: Option, - pub credentials: Option, - pub sts_role_arn: Option, - pub endpoint: Option, - - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, -} - -impl From for StorageProfile { - fn from(profile: models::StorageProfile) -> Self { - Self { - id: profile.id, - r#type: profile.r#type.into(), - region: profile.region, - bucket: profile.bucket, - credentials: profile.credentials.map(std::convert::Into::into), - sts_role_arn: profile.sts_role_arn, - endpoint: profile.endpoint, - created_at: profile.created_at, - updated_at: profile.updated_at, - } - } -} - -#[cfg(test)] -#[allow(clippy::unwrap_used)] -mod tests { - use super::*; - - #[test] - fn test_deserialize_create_storage_profile_payload() { - let payload = r#" - { - "type": "aws", - "region": "us-west-2", - "bucket": "my-bucket", - "credentials": { - "credential_type": "access_key", - "aws_access_key_id": "my-access-key", - "aws_secret_access_key": "my-secret-access-key" - } - } - "#; - - let result: CreateStorageProfilePayload = serde_json::from_str(payload).unwrap(); - assert_eq!(result.region.unwrap_or_default(), "us-west-2"); - assert_eq!(result.bucket.unwrap_or_default(), "my-bucket"); - assert_eq!(result.provider_type, CloudProvider::Aws); - assert_eq!( - result.credentials.unwrap(), - Credentials::AccessKey(AwsAccessKeyCredential { - aws_access_key_id: "my-access-key".to_string(), - aws_secret_access_key: "my-secret-access-key".to_string(), - }) - ); - } - - #[test] - fn test_serialize_create_storage_profile_payload() { - let payload = CreateStorageProfilePayload { - provider_type: CloudProvider::Aws, - region: Some("us-west-2".to_string()), - bucket: Some("my-bucket".to_string()), - credentials: Some(Credentials::AccessKey(AwsAccessKeyCredential { - aws_access_key_id: "my-access-key".to_string(), - aws_secret_access_key: "my-secret-access-key".to_string(), - })), - sts_role_arn: None, - endpoint: None, - }; - - let result = serde_json::to_string(&payload).unwrap(); - - let expected = r#"{"type":"aws","region":"us-west-2","bucket":"my-bucket","credentials":{"credential_type":"access_key","aws_access_key_id":"my-access-key","aws_secret_access_key":"my-secret-access-key"},"sts_role_arn":null,"endpoint":null}"#; - assert_eq!(result, expected); - } -} diff --git a/crates/runtime/src/http/control/schemas/warehouses.rs b/crates/runtime/src/http/control/schemas/warehouses.rs deleted file mode 100644 index 60b1c5da7..000000000 --- a/crates/runtime/src/http/control/schemas/warehouses.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use chrono::NaiveDateTime; -use control_plane::models; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use uuid::Uuid; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct CreateWarehouseRequest { - pub prefix: String, - pub name: String, - pub storage_profile_id: Uuid, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct Warehouse { - pub id: Uuid, - pub prefix: String, - pub name: String, - pub location: String, - pub storage_profile_id: Uuid, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, -} - -impl From for models::WarehouseCreateRequest { - fn from(request: CreateWarehouseRequest) -> Self { - Self { - prefix: request.prefix, - name: request.name, - storage_profile_id: request.storage_profile_id, - } - } -} - -impl From for Warehouse { - fn from(warehouse: models::Warehouse) -> Self { - Self { - id: warehouse.id, - prefix: warehouse.prefix, - name: warehouse.name, - location: warehouse.location, - storage_profile_id: warehouse.storage_profile_id, - created_at: warehouse.created_at, - updated_at: warehouse.updated_at, - } - } -} diff --git a/crates/runtime/src/http/dbt/error.rs b/crates/runtime/src/http/dbt/error.rs index 8d6a95fb5..1ff3f2672 100644 --- a/crates/runtime/src/http/dbt/error.rs +++ b/crates/runtime/src/http/dbt/error.rs @@ -213,93 +213,3 @@ impl IntoResponse for ExecutionError { (status_code, body).into_response() } } - -#[cfg(test)] -#[allow(clippy::unwrap_in_result)] -mod tests { - - // TODO: Replace these with snapshot tests - /*#[test] - fn test_http_server_response() { - assert_ne!( - http::StatusCode::INTERNAL_SERVER_ERROR, - DbtError::ControlService { - source: ControlPlaneError::Execution { - source: IceBucketSQLError::Arrow { - source: ArrowError::ComputeError(String::new()) - } - }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::UNSUPPORTED_MEDIA_TYPE, - DbtError::ControlService { - source: ControlPlaneError::Execution { - source: IceBucketSQLError::Arrow { - source: ArrowError::ComputeError(String::new()) - } - }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::UNPROCESSABLE_ENTITY, - DbtError::ControlService { - source: ControlPlaneError::Execution { - source: IceBucketSQLError::DataFusion { - source: DataFusionError::ArrowError( - ArrowError::InvalidArgumentError(String::new()), - Some(String::new()), - ) - }, - }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::NOT_FOUND, - DbtError::ControlService { - source: ControlPlaneError::WarehouseNameNotFound { - name: String::new() - }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::NOT_FOUND, - DbtError::ControlService { - source: ControlPlaneError::WarehouseNotFound { id: Uuid::new_v4() }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::NOT_FOUND, - DbtError::ControlService { - source: ControlPlaneError::WarehouseNotFound { id: Uuid::new_v4() }, - } - .into_response() - .status(), - ); - assert_eq!( - http::StatusCode::UNPROCESSABLE_ENTITY, - DbtError::ControlService { - source: ControlPlaneError::DataFusion { - // here just any error for test, since we are handling any DataFusion err - source: DataFusionError::ArrowError( - ArrowError::InvalidArgumentError(String::new()), - Some(String::new()), - ) - } - } - .into_response() - .status(), - ); - } - */ -} diff --git a/crates/runtime/src/http/dbt/handlers.rs b/crates/runtime/src/http/dbt/handlers.rs index 7fe048697..6a8d52639 100644 --- a/crates/runtime/src/http/dbt/handlers.rs +++ b/crates/runtime/src/http/dbt/handlers.rs @@ -16,7 +16,7 @@ // under the License. use super::error::{self as dbt_error, DbtError, DbtResult}; -use crate::execution::query::IceBucketQueryContext; +use crate::execution::query::QueryContext; use crate::execution::utils::DataSerializationFormat; use crate::http::dbt::schemas::{ JsonResponse, LoginData, LoginRequestBody, LoginRequestQuery, LoginResponse, QueryRequest, @@ -132,11 +132,7 @@ pub async fn query( let (records, columns) = state .execution_svc - .query( - &session_id, - &body_json.sql_text, - IceBucketQueryContext::default(), - ) + .query(&session_id, &body_json.sql_text, QueryContext::default()) .await .map_err(|e| DbtError::Execution { source: e })?; diff --git a/crates/runtime/src/http/metastore/error.rs b/crates/runtime/src/http/metastore/error.rs index 37c677ef0..30cf4f276 100644 --- a/crates/runtime/src/http/metastore/error.rs +++ b/crates/runtime/src/http/metastore/error.rs @@ -17,7 +17,7 @@ use crate::http::error::ErrorResponse; use axum::{response::IntoResponse, Json}; -use icebucket_metastore::error::MetastoreError; +use embucket_metastore::error::MetastoreError; use snafu::prelude::*; #[derive(Snafu, Debug)] diff --git a/crates/runtime/src/http/metastore/handlers.rs b/crates/runtime/src/http/metastore/handlers.rs index bae07b73f..06e72b91f 100644 --- a/crates/runtime/src/http/metastore/handlers.rs +++ b/crates/runtime/src/http/metastore/handlers.rs @@ -23,29 +23,15 @@ use axum::{ use snafu::ResultExt; #[allow(clippy::wildcard_imports)] -use icebucket_metastore::{ +use embucket_metastore::{ error::{self as metastore_error, MetastoreError}, *, }; use crate::http::state::AppState; -use icebucket_utils::list_config::ListConfig; +use embucket_utils::list_config::ListConfig; use validator::Validate; -/*#[derive(OpenApi)] -#[openapi( - paths( - list_volumes, - get_volume - ), - components( - schemas( - HTTPIceBucketVolume, - ), - ) -)] -pub struct MetastoreApi;*/ - pub type RwObjectVec = Vec>; #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] @@ -54,19 +40,10 @@ pub struct QueryParameters { pub cascade: Option, } -/*#[utoipa::path( - get, - operation_id = "listVolumes", - path="/volumes", - responses( - (status = StatusCode::OK, body = RwObjectVec), - (status = "5XX", description = "server error"), - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn list_volumes( State(state): State, -) -> MetastoreAPIResult>> { +) -> MetastoreAPIResult>> { let volumes = state .metastore .list_volumes(ListConfig::default()) @@ -78,24 +55,11 @@ pub async fn list_volumes( Ok(Json(volumes)) } -/*#[utoipa::path( - get, - operation_id = "getVolume", - path="/volumes/{volumeName}", - params( - ("volumeName" = String, description = "Volume Name") - ), - responses( - (status = StatusCode::OK, body = RwObject), - (status = StatusCode::NOT_FOUND, description = "Volume not found", body = ErrorResponse), - (status = "5XX", description = "server error", body = ErrorResponse), - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn get_volume( State(state): State, Path(volume_name): Path, -) -> MetastoreAPIResult>> { +) -> MetastoreAPIResult>> { match state.metastore.get_volume(&volume_name).await { Ok(Some(volume)) => Ok(Json(hide_sensitive(volume))), Ok(None) => Err(MetastoreError::VolumeNotFound { @@ -106,21 +70,11 @@ pub async fn get_volume( } } -/*#[utoipa::path( - post, - operation_id = "createVolume", - path="/volumes", - request_body = IceBucketVolume, - responses( - (status = 200, body = RwObject), - (status = "5XX", description = "server error", body = ErrorResponse), - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn create_volume( State(state): State, - Json(volume): Json, -) -> MetastoreAPIResult>> { + Json(volume): Json, +) -> MetastoreAPIResult>> { volume .validate() .context(metastore_error::ValidationSnafu)?; @@ -132,22 +86,12 @@ pub async fn create_volume( .map(|v| Json(hide_sensitive(v))) } -/*#[utoipa::path( - put, - operation_id = "updateVolume", - path="/volumes/{volumeName}", - params(("volumeName" = String, description = "Volume Name")), - request_body = IceBucketVolume, - responses((status = 200, body = IceBucketVolume), - (status = 404, description = "Volume not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn update_volume( State(state): State, Path(volume_name): Path, - Json(volume): Json, -) -> MetastoreAPIResult>> { + Json(volume): Json, +) -> MetastoreAPIResult>> { volume .validate() .context(metastore_error::ValidationSnafu)?; @@ -159,13 +103,6 @@ pub async fn update_volume( .map(|v| Json(hide_sensitive(v))) } -/*#[utoipa::path( - delete, - operation_id = "deleteVolume", - path="/volumes/{volumeName}", - params(("volumeName" = String, description = "Volume Name")), - responses((status = 200), (status = 404, description = "Volume not found")) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn delete_volume( State(state): State, @@ -179,16 +116,10 @@ pub async fn delete_volume( .map_err(MetastoreAPIError) } -/*#[utoipa::path( - get, - operation_id = "listDatabases", - path="/databases", - responses((status = 200, body = RwObjectVec)) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn list_databases( State(state): State, -) -> MetastoreAPIResult>>> { +) -> MetastoreAPIResult>>> { state .metastore .list_databases(ListConfig::default()) @@ -197,20 +128,11 @@ pub async fn list_databases( .map(Json) } -/*#[utoipa::path( - get, - operation_id = "getDatabase", - path="/databases/{databaseName}", - params(("databaseName" = String, description = "Database Name")), - responses((status = 200, body = RwObject), - (status = 404, description = "Database not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn get_database( State(state): State, Path(database_name): Path, -) -> MetastoreAPIResult>> { +) -> MetastoreAPIResult>> { match state.metastore.get_database(&database_name).await { Ok(Some(db)) => Ok(Json(db)), Ok(None) => Err(MetastoreError::DatabaseNotFound { @@ -221,18 +143,11 @@ pub async fn get_database( } } -/*#[utoipa::path( - post, - operation_id = "createDatabase", - path="/databases", - request_body = IceBucketDatabase, - responses((status = 200, body = RwObject)) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn create_database( State(state): State, - Json(database): Json, -) -> MetastoreAPIResult>> { + Json(database): Json, +) -> MetastoreAPIResult>> { database .validate() .context(metastore_error::ValidationSnafu)?; @@ -244,22 +159,12 @@ pub async fn create_database( .map(Json) } -/*#[utoipa::path( - put, - operation_id = "updateDatabase", - path="/databases/{databaseName}", - params(("databaseName" = String, description = "Database Name")), - request_body = IceBucketDatabase, - responses((status = 200, body = RwObject), - (status = 404, description = "Database not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn update_database( State(state): State, Path(database_name): Path, - Json(database): Json, -) -> MetastoreAPIResult>> { + Json(database): Json, +) -> MetastoreAPIResult>> { database .validate() .context(metastore_error::ValidationSnafu)?; @@ -272,13 +177,6 @@ pub async fn update_database( .map(Json) } -/*#[utoipa::path( - delete, - operation_id = "deleteDatabase", - path="/databases/{databaseName}", - params(("databaseName" = String, description = "Database Name")), - responses((status = 200)) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn delete_database( State(state): State, @@ -292,20 +190,11 @@ pub async fn delete_database( .map_err(MetastoreAPIError) } -/*#[utoipa::path( - get, - operation_id = "listSchemas", - path="/databases/{databaseName}/schemas", - params(("databaseName" = String, description = "Database Name")), - responses((status = 200, body = RwObjectVec), - (status = 404, description = "Database not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn list_schemas( State(state): State, Path(database_name): Path, -) -> MetastoreAPIResult>>> { +) -> MetastoreAPIResult>>> { state .metastore .list_schemas(&database_name, ListConfig::default()) @@ -314,23 +203,12 @@ pub async fn list_schemas( .map(Json) } -/*#[utoipa::path( - get, - operation_id = "getSchema", - path="/databases/{databaseName}/schemas/{schemaName}", - params(("databaseName" = String, description = "Database Name"), - ("schemaName" = String, description = "Schema Name") - ), - responses((status = 200, body = RwObject), - (status = 404, description = "Schema not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn get_schema( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, -) -> MetastoreAPIResult>> { - let schema_ident = IceBucketSchemaIdent { +) -> MetastoreAPIResult>> { + let schema_ident = SchemaIdent { database: database_name.clone(), schema: schema_name.clone(), }; @@ -345,20 +223,12 @@ pub async fn get_schema( } } -/*#[utoipa::path( - post, - operation_id = "createSchema", - path="/databases/{databaseName}/schemas", - params(("databaseName" = String, description = "Database Name")), - request_body = IceBucketSchema, - responses((status = 200, body = RwObject)) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn create_schema( State(state): State, Path(database_name): Path, - Json(schema): Json, -) -> MetastoreAPIResult>> { + Json(schema): Json, +) -> MetastoreAPIResult>> { state .metastore .create_schema(&schema.ident.clone(), schema) @@ -367,25 +237,13 @@ pub async fn create_schema( .map(Json) } -/*#[utoipa::path( - put, - operation_id = "updateSchema", - path="/databases/{databaseName}/schemas/{schemaName}", - params(("databaseName" = String, description = "Database Name"), - ("schemaName" = String, description = "Schema Name") - ), - request_body = IceBucketSchema, - responses((status = 200, body = RwObject), - (status = 404, description = "Schema not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn update_schema( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, - Json(schema): Json, -) -> MetastoreAPIResult>> { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); + Json(schema): Json, +) -> MetastoreAPIResult>> { + let schema_ident = SchemaIdent::new(database_name, schema_name); // TODO: Implement schema renames state .metastore @@ -395,22 +253,13 @@ pub async fn update_schema( .map(Json) } -/*#[utoipa::path( - delete, - operation_id = "deleteSchema", - path="/databases/{databaseName}/schemas/{schemaName}", - params(("databaseName" = String, description = "Database Name"), - ("schemaName" = String, description = "Schema Name") - ), - responses((status = 200)) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn delete_schema( State(state): State, Query(query): Query, Path((database_name, schema_name)): Path<(String, String)>, ) -> MetastoreAPIResult<()> { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); + let schema_ident = SchemaIdent::new(database_name, schema_name); state .metastore .delete_schema(&schema_ident, query.cascade.unwrap_or_default()) @@ -418,23 +267,12 @@ pub async fn delete_schema( .map_err(MetastoreAPIError) } -/*#[utoipa::path( - get, - operation_id = "listTables", - path="/databases/{databaseName}/schemas/{schemaName}/tables", - params(("databaseName" = String, description = "Database Name"), - ("schemaName" = String, description = "Schema Name") - ), - responses((status = 200, body = RwObjectVec), - (status = 404, description = "Schema not found") - ) -)]*/ #[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] pub async fn list_tables( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, -) -> MetastoreAPIResult>>> { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); +) -> MetastoreAPIResult>>> { + let schema_ident = SchemaIdent::new(database_name, schema_name); state .metastore .list_tables(&schema_ident, ListConfig::default()) @@ -447,8 +285,8 @@ pub async fn list_tables( pub async fn get_table( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, -) -> MetastoreAPIResult>> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); +) -> MetastoreAPIResult>> { + let table_ident = TableIdent::new(&database_name, &schema_name, &table_name); match state.metastore.get_table(&table_ident).await { Ok(Some(table)) => Ok(Json(table)), Ok(None) => Err(MetastoreError::TableNotFound { @@ -465,10 +303,10 @@ pub async fn get_table( pub async fn create_table( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, - Json(table): Json, -) -> MetastoreAPIResult>> { + Json(table): Json, +) -> MetastoreAPIResult>> { table.validate().context(metastore_error::ValidationSnafu)?; - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table.ident.table); + let table_ident = TableIdent::new(&database_name, &schema_name, &table.ident.table); state .metastore .create_table(&table_ident, table) @@ -481,9 +319,9 @@ pub async fn create_table( pub async fn update_table( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, - Json(table): Json, -) -> MetastoreAPIResult>> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + Json(table): Json, +) -> MetastoreAPIResult>> { + let table_ident = TableIdent::new(&database_name, &schema_name, &table_name); state .metastore .update_table(&table_ident, table) @@ -498,7 +336,7 @@ pub async fn delete_table( Query(query): Query, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> MetastoreAPIResult<()> { - let table_ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let table_ident = TableIdent::new(&database_name, &schema_name, &table_name); state .metastore .delete_table(&table_ident, query.cascade.unwrap_or_default()) @@ -508,9 +346,9 @@ pub async fn delete_table( #[allow(clippy::needless_pass_by_value)] #[must_use] -pub fn hide_sensitive(volume: RwObject) -> RwObject { +pub fn hide_sensitive(volume: RwObject) -> RwObject { let mut new_volume = volume; - if let IceBucketVolumeType::S3(ref mut s3_volume) = new_volume.data.volume { + if let VolumeType::S3(ref mut s3_volume) = new_volume.data.volume { if let Some(AwsCredentials::AccessKey(ref mut access_key)) = s3_volume.credentials { access_key.aws_access_key_id = "******".to_string(); access_key.aws_secret_access_key = "******".to_string(); diff --git a/crates/runtime/src/http/metastore/models.rs b/crates/runtime/src/http/metastore/models.rs index e4e05918c..bf3882979 100644 --- a/crates/runtime/src/http/metastore/models.rs +++ b/crates/runtime/src/http/metastore/models.rs @@ -17,9 +17,9 @@ // These are API wrappers for the metastore models -use icebucket_metastore::models::*; +use embucket_metastore::models::*; use serde::{Deserialize, Serialize}; -use utoipa::{ToSchema}; +use utoipa::ToSchema; #[derive(Debug, Clone, PartialEq, Eq)] pub struct HTTPRwObject(pub RwObject); @@ -50,4 +50,3 @@ impl From> for Vec> { http_rw_objects.0.into_iter().map(RwObject::from).collect() } } - diff --git a/crates/runtime/src/http/mod.rs b/crates/runtime/src/http/mod.rs index 16ad23bb8..1c7adbe87 100644 --- a/crates/runtime/src/http/mod.rs +++ b/crates/runtime/src/http/mod.rs @@ -23,9 +23,9 @@ use axum::{ response::{IntoResponse, Response}, Router, }; +use embucket_history::store::WorksheetsStore; +use embucket_metastore::Metastore; use http_body_util::BodyExt; -use icebucket_history::store::WorksheetsStore; -use icebucket_metastore::Metastore; use std::sync::Arc; use time::Duration; use tokio::signal; @@ -54,13 +54,13 @@ pub mod utils; #[cfg(test)] mod tests; -use super::http::config::IceBucketWebConfig; +use super::http::config::WebConfig; #[allow(clippy::needless_pass_by_value)] -pub fn make_icebucket_app( +pub fn make_app( metastore: Arc, history: Arc, - config: &IceBucketWebConfig, + config: &WebConfig, ) -> Result> { let execution_cfg = execution::utils::Config::new(&config.data_format)?; let execution_svc = Arc::new(ExecutionService::new(metastore.clone(), execution_cfg)); @@ -95,10 +95,7 @@ pub fn make_icebucket_app( } #[allow(clippy::unwrap_used, clippy::as_conversions)] -pub async fn run_icebucket_app( - app: Router, - config: &IceBucketWebConfig, -) -> Result<(), Box> { +pub async fn run_app(app: Router, config: &WebConfig) -> Result<(), Box> { let host = config.host.clone(); let port = config.port; let listener = tokio::net::TcpListener::bind(format!("{host}:{port}")).await?; diff --git a/crates/runtime/src/http/state.rs b/crates/runtime/src/http/state.rs index 8d50eb773..84408e635 100644 --- a/crates/runtime/src/http/state.rs +++ b/crates/runtime/src/http/state.rs @@ -15,14 +15,14 @@ // specific language governing permissions and limitations // under the License. -use icebucket_history::store::WorksheetsStore; -use icebucket_metastore::metastore::Metastore; +use embucket_history::store::WorksheetsStore; +use embucket_metastore::metastore::Metastore; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::Mutex; use crate::execution::service::ExecutionService; -use crate::http::config::IceBucketWebConfig; +use crate::http::config::WebConfig; // Define a State struct that contains shared services or repositories #[derive(Clone)] @@ -31,7 +31,7 @@ pub struct AppState { pub history: Arc, pub execution_svc: Arc, pub dbt_sessions: Arc>>, - pub config: Arc, + pub config: Arc, } impl AppState { @@ -40,7 +40,7 @@ impl AppState { metastore: Arc, history: Arc, execution_svc: Arc, - config: Arc, + config: Arc, ) -> Self { Self { metastore, diff --git a/crates/runtime/src/http/tests/query.rs b/crates/runtime/src/http/tests/query.rs index 9e70a7833..bbdcfa8f6 100644 --- a/crates/runtime/src/http/tests/query.rs +++ b/crates/runtime/src/http/tests/query.rs @@ -17,15 +17,16 @@ #![allow(clippy::unwrap_used, clippy::expect_used)] -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; // for `collect` use crate::http::ui::{ queries::models::QueryCreateResponse, worksheets::models::{WorksheetCreatePayload, WorksheetCreateResponse}, }; use chrono::{TimeZone, Utc}; -use icebucket_metastore::{ - IceBucketDatabase, IceBucketSchema, IceBucketSchemaIdent, IceBucketVolume, +use embucket_metastore::{ + Database as MetastoreDatabase, Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent, + Volume as MetastoreVolume, }; use serde_json::json; @@ -44,13 +45,13 @@ fn get_patched_query_response(query_resp: &str) -> String { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_parallel_queries() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); let client2 = reqwest::Client::new(); - let vol = IceBucketVolume { + let vol = MetastoreVolume { ident: "test_volume".to_string(), - volume: icebucket_metastore::IceBucketVolumeType::Memory, + volume: embucket_metastore::VolumeType::Memory, }; let _create_volume = client @@ -63,7 +64,7 @@ async fn test_parallel_queries() { .error_for_status_ref() .expect("Create volume wasn't 200"); - let db = IceBucketDatabase { + let db = MetastoreDatabase { ident: "benchmark".to_string(), volume: "test_volume".to_string(), properties: None, @@ -79,8 +80,8 @@ async fn test_parallel_queries() { .error_for_status_ref() .expect("Create database wasn't 200"); - let schema = IceBucketSchema { - ident: IceBucketSchemaIdent { + let schema = MetastoreSchema { + ident: MetastoreSchemaIdent { database: "benchmark".to_string(), schema: "public".to_string(), }, diff --git a/crates/runtime/src/http/tests/snapshots/icebucket_runtime__http__tests__query__parallel_queries.snap b/crates/runtime/src/http/tests/snapshots/embucket_runtime__http__tests__query__parallel_queries.snap similarity index 97% rename from crates/runtime/src/http/tests/snapshots/icebucket_runtime__http__tests__query__parallel_queries.snap rename to crates/runtime/src/http/tests/snapshots/embucket_runtime__http__tests__query__parallel_queries.snap index 19ed38adb..7066912d2 100644 --- a/crates/runtime/src/http/tests/snapshots/icebucket_runtime__http__tests__query__parallel_queries.snap +++ b/crates/runtime/src/http/tests/snapshots/embucket_runtime__http__tests__query__parallel_queries.snap @@ -1,6 +1,6 @@ --- source: crates/runtime/src/http/tests/query.rs -expression: "(get_patched_query_response(query1), get_patched_query_response(query2),)" +expression: "(get_patched_query_response(query1.as_str()),\nget_patched_query_response(query2.as_str()),)" --- ( "{\"id\":99999,\"worksheetId\":99999,\"query\":\"\\n CREATE TABLE benchmark.public.hits\\n (\\n WatchID BIGINT NOT NULL,\\n JavaEnable INTEGER NOT NULL,\\n Title TEXT NOT NULL,\\n GoodEvent INTEGER NOT NULL,\\n EventTime BIGINT NOT NULL,\\n EventDate INTEGER NOT NULL,\\n CounterID INTEGER NOT NULL,\\n ClientIP INTEGER NOT NULL,\\n PRIMARY KEY (CounterID, EventDate, EventTime, WatchID)\\n );\\n \",\"startTime\":\"2025-01-01T00:00:00Z\",\"endTime\":\"2025-01-01T00:00:00Z\",\"durationMs\":100,\"resultCount\":1,\"result\":{\"columns\":[{\"name\":\"count\",\"type\":\"fixed\"}],\"rows\":[[0]]},\"status\":\"Ok\",\"error\":\"\"}", diff --git a/crates/runtime/src/http/ui/databases/error.rs b/crates/runtime/src/http/ui/databases/error.rs index 54e85f7be..09c28a03c 100644 --- a/crates/runtime/src/http/ui/databases/error.rs +++ b/crates/runtime/src/http/ui/databases/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_metastore::error::MetastoreError; use http::StatusCode; -use icebucket_metastore::error::MetastoreError; use snafu::prelude::*; pub type DatabasesResult = Result; diff --git a/crates/runtime/src/http/ui/databases/handlers.rs b/crates/runtime/src/http/ui/databases/handlers.rs index 727c312c1..133f2017b 100644 --- a/crates/runtime/src/http/ui/databases/handlers.rs +++ b/crates/runtime/src/http/ui/databases/handlers.rs @@ -30,9 +30,9 @@ use axum::{ extract::{Path, Query, State}, Json, }; -use icebucket_metastore::error::MetastoreError; -use icebucket_metastore::IceBucketDatabase; -use icebucket_utils::list_config::ListConfig; +use embucket_metastore::error::MetastoreError; +use embucket_metastore::Database as MetastoreDatabase; +use embucket_utils::list_config::ListConfig; use utoipa::OpenApi; use validator::Validate; @@ -78,7 +78,7 @@ pub async fn create_database( State(state): State, Json(database): Json, ) -> DatabasesResult> { - let database: IceBucketDatabase = database.data.into(); + let database: MetastoreDatabase = database.data.into(); database.validate().map_err(|e| DatabasesAPIError::Create { source: MetastoreError::Validation { source: e }, })?; @@ -172,7 +172,7 @@ pub async fn update_database( Path(database_name): Path, Json(database): Json, ) -> DatabasesResult> { - let database: IceBucketDatabase = database.data.into(); + let database: MetastoreDatabase = database.data.into(); database.validate().map_err(|e| DatabasesAPIError::Update { source: MetastoreError::Validation { source: e }, })?; diff --git a/crates/runtime/src/http/ui/databases/models.rs b/crates/runtime/src/http/ui/databases/models.rs index 3b23a7112..2131114ae 100644 --- a/crates/runtime/src/http/ui/databases/models.rs +++ b/crates/runtime/src/http/ui/databases/models.rs @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -use icebucket_metastore::models::IceBucketDatabase; +use embucket_metastore::models::Database as MetastoreDatabase; use serde::{Deserialize, Serialize}; use utoipa::{IntoParams, ToSchema}; @@ -25,8 +25,8 @@ pub struct Database { pub volume: String, } -impl From for Database { - fn from(db: IceBucketDatabase) -> Self { +impl From for Database { + fn from(db: MetastoreDatabase) -> Self { Self { name: db.ident, volume: db.volume, @@ -36,9 +36,9 @@ impl From for Database { // TODO: Remove it when found why it can't locate .into() if only From trait implemeted #[allow(clippy::from_over_into)] -impl Into for Database { - fn into(self) -> IceBucketDatabase { - IceBucketDatabase { +impl Into for Database { + fn into(self) -> MetastoreDatabase { + MetastoreDatabase { ident: self.name, volume: self.volume, properties: None, diff --git a/crates/runtime/src/http/ui/error.rs b/crates/runtime/src/http/ui/error.rs index eb0bf6cfa..86dea9a4e 100644 --- a/crates/runtime/src/http/ui/error.rs +++ b/crates/runtime/src/http/ui/error.rs @@ -29,7 +29,7 @@ pub enum UIError { }, #[snafu(transparent)] Metastore { - source: icebucket_metastore::error::MetastoreError, + source: embucket_metastore::error::MetastoreError, }, } pub type UIResult = Result; diff --git a/crates/runtime/src/http/ui/navigation_trees/error.rs b/crates/runtime/src/http/ui/navigation_trees/error.rs index 6948a9b38..8221658c4 100644 --- a/crates/runtime/src/http/ui/navigation_trees/error.rs +++ b/crates/runtime/src/http/ui/navigation_trees/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_metastore::error::MetastoreError; use http::StatusCode; -use icebucket_metastore::error::MetastoreError; use snafu::prelude::*; pub type NavigationTreesResult = Result; diff --git a/crates/runtime/src/http/ui/navigation_trees/handlers.rs b/crates/runtime/src/http/ui/navigation_trees/handlers.rs index 0ead921b6..bb1851c01 100644 --- a/crates/runtime/src/http/ui/navigation_trees/handlers.rs +++ b/crates/runtime/src/http/ui/navigation_trees/handlers.rs @@ -24,7 +24,7 @@ use crate::http::ui::navigation_trees::models::{ }; use axum::extract::Query; use axum::{extract::State, Json}; -use icebucket_utils::list_config::ListConfig; +use embucket_utils::list_config::ListConfig; use utoipa::OpenApi; #[derive(OpenApi)] diff --git a/crates/runtime/src/http/ui/old_handlers/common.rs b/crates/runtime/src/http/ui/old_handlers/common.rs deleted file mode 100644 index fa00e0570..000000000 --- a/crates/runtime/src/http/ui/old_handlers/common.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use super::super::models::error::{self as model_error, NexusResult}; -use crate::http::ui::models::database::Database; -use crate::http::ui::models::storage_profile::StorageProfile; -use crate::http::ui::models::table::{Statistics, Table}; -use crate::http::ui::models::warehouse::Warehouse; -use crate::state::AppState; -use catalog::models::{Database as DatabaseModel, DatabaseIdent, TableIdent, WarehouseIdent}; -use control_plane::models::Warehouse as WarehouseModel; -use snafu::ResultExt; -use uuid::Uuid; - -impl AppState { - pub async fn get_warehouse_model(&self, warehouse_id: Uuid) -> NexusResult { - self.control_svc - .get_warehouse(warehouse_id) - .await - .context(model_error::WarehouseFetchSnafu { id: warehouse_id }) - } - - pub async fn get_warehouse_by_id(&self, warehouse_id: Uuid) -> NexusResult { - self.get_warehouse_model(warehouse_id) - .await - .map(std::convert::Into::into) - } - - pub async fn get_profile_by_id(&self, storage_profile_id: Uuid) -> NexusResult { - self.control_svc - .get_profile(storage_profile_id) - .await - .context(model_error::StorageProfileFetchSnafu { - id: storage_profile_id, - }) - .map(std::convert::Into::into) - } - - pub async fn get_database(&self, ident: &DatabaseIdent) -> NexusResult { - self.catalog_svc - .get_namespace(ident) - .await - .context(model_error::DatabaseFetchSnafu { id: ident.clone() }) - .map(std::convert::Into::into) - } - - #[allow( - clippy::cast_possible_truncation, - clippy::as_conversions, - clippy::cast_possible_wrap - )] - #[tracing::instrument(level = "debug", skip(self), err, ret(level = tracing::Level::TRACE))] - pub async fn list_warehouses(&self) -> NexusResult> { - let warehouses: Vec = self - .control_svc - .list_warehouses() - .await - .context(model_error::WarehouseListSnafu)? - .into_iter() - .map(std::convert::Into::into) - .collect(); - - let mut result = Vec::new(); - for mut warehouse in warehouses { - let profile = self.get_profile_by_id(warehouse.storage_profile_id).await?; - - let databases = self.list_databases(warehouse.id, profile.clone()).await?; - - let mut total_statistics = Statistics { - database_count: Some(databases.len() as i32), - ..Default::default() - }; - for database in &databases { - let stats = database.clone().statistics; - total_statistics = total_statistics.aggregate(&stats); - } - warehouse.storage_profile = profile; - warehouse.databases = databases; - warehouse.statistics = total_statistics; - result.push(warehouse); - } - Ok(result) - } - - #[tracing::instrument(level = "debug", skip(self), err, ret(level = tracing::Level::TRACE))] - pub async fn list_databases_models( - &self, - warehouse_id: Uuid, - ) -> NexusResult> { - self.catalog_svc - .list_namespaces(&WarehouseIdent::new(warehouse_id), None) - .await - .context(model_error::DatabaseModelListSnafu { id: warehouse_id }) - } - - #[tracing::instrument(level = "debug", skip(self), err, ret(level = tracing::Level::TRACE))] - pub async fn list_databases( - &self, - warehouse_id: Uuid, - profile: StorageProfile, - ) -> NexusResult> { - let ident = &WarehouseIdent::new(warehouse_id); - let databases = self - .catalog_svc - .list_namespaces(ident, None) - .await - .context(model_error::NamespaceListSnafu { id: warehouse_id })?; - - let mut database_entities = Vec::new(); - for database in databases { - let tables = self.list_tables(&database.ident).await?; - let mut entity = Database::from(database); - entity.with_details(warehouse_id, &profile, tables); - database_entities.push(entity); - } - Ok(database_entities) - } - - #[tracing::instrument(level = "debug", skip(self), err, ret(level = tracing::Level::TRACE))] - pub async fn list_tables(&self, ident: &DatabaseIdent) -> NexusResult> { - let tables = self - .catalog_svc - .list_tables(ident) - .await - .context(model_error::TableListSnafu { id: ident.clone() })? - .into_iter() - .map(Into::into) - .collect(); - Ok(tables) - } - - #[tracing::instrument(level = "debug", skip(self), err, ret(level = tracing::Level::TRACE))] - pub async fn get_table(&self, ident: &TableIdent) -> NexusResult
{ - let table = self - .catalog_svc - .load_table(ident) - .await - .context(model_error::TableFetchSnafu { id: ident.clone() })?; - Ok(table.into()) - } -} diff --git a/crates/runtime/src/http/ui/old_handlers/mod.rs b/crates/runtime/src/http/ui/old_handlers/mod.rs deleted file mode 100644 index bba821893..000000000 --- a/crates/runtime/src/http/ui/old_handlers/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//pub mod common; -//pub mod tables; diff --git a/crates/runtime/src/http/ui/old_handlers/profiles.rs b/crates/runtime/src/http/ui/old_handlers/profiles.rs deleted file mode 100644 index d819efe38..000000000 --- a/crates/runtime/src/http/ui/old_handlers/profiles.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use super::super::models::error::{self as model_error, NexusError, NexusResult}; -use crate::http::ui::models::{aws, storage_profile}; -use crate::state::AppState; -use axum::{extract::Path, extract::State, Json}; -use control_plane::models::{StorageProfile, StorageProfileCreateRequest}; -use snafu::ResultExt; -use utoipa::OpenApi; -use uuid::Uuid; - -#[derive(OpenApi)] -#[openapi( - paths( - create_storage_profile, - get_storage_profile, - delete_storage_profile, - list_storage_profiles, - ), - components( - schemas( - storage_profile::CreateStorageProfilePayload, - storage_profile::StorageProfile, - aws::Credentials, - aws::AwsAccessKeyCredential, - aws::AwsRoleCredential, - aws::CloudProvider, - NexusError, - ) - ), - tags( - (name = "storage_profiles", description = "Storage profiles management endpoints.") - ) -)] -pub struct ApiDoc; - -#[utoipa::path( - post, - operation_id = "createStorageProfile", - tags = ["storage_profiles"], - path = "/ui/storage-profiles", - request_body = storage_profile::CreateStorageProfilePayload, - responses( - (status = 200, description = "Successful Response", body = storage_profile::StorageProfile), - (status = 400, description = "Bad request", body = NexusError), - (status = 422, description = "Unprocessable entity", body = NexusError), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn create_storage_profile( - State(state): State, - Json(payload): Json, -) -> NexusResult> { - let request: StorageProfileCreateRequest = payload.into(); - let profile: StorageProfile = state - .control_svc - .create_profile(&request) - .await - .context(model_error::StorageProfileCreateSnafu)?; - Ok(Json(profile.into())) -} - -#[utoipa::path( - get, - operation_id = "getStorageProfile", - tags = ["storage_profiles"], - path = "/ui/storage-profiles/{storageProfileId}", - params( - ("storageProfileId" = Uuid, Path, description = "Storage profile ID") - ), - responses( - (status = 200, description = "Successful Response", body = storage_profile::StorageProfile), - (status = 404, description = "Not found", body = NexusError), - (status = 422, description = "Unprocessable entity", body = NexusError), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_storage_profile( - State(state): State, - Path(storage_profile_id): Path, -) -> NexusResult> { - let profile = state.get_profile_by_id(storage_profile_id).await?; - Ok(Json(profile)) -} - -#[utoipa::path( - delete, - operation_id = "deleteStorageProfile", - tags = ["storage_profiles"], - path = "/ui/storage-profiles/{storageProfileId}", - params( - ("storageProfileId" = Uuid, Path, description = "Storage profile ID") - ), - responses( - (status = 200, description = "Successful Response", body = storage_profile::StorageProfile), - (status = 404, description = "Not found", body = NexusError), - (status = 422, description = "Unprocessable entity", body = NexusError), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn delete_storage_profile( - State(state): State, - Path(storage_profile_id): Path, -) -> NexusResult> { - state - .control_svc - .delete_profile(storage_profile_id) - .await - .context(model_error::StorageProfileDeleteSnafu { - id: storage_profile_id, - })?; - Ok(Json(())) -} - -#[utoipa::path( - get, - operation_id = "listStorageProfiles", - tags = ["storage_profiles"], - path = "/ui/storage-profiles", - responses( - (status = 200, body = Vec), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn list_storage_profiles( - State(state): State, -) -> NexusResult>> { - let profiles = state - .control_svc - .list_profiles() - .await - .context(model_error::StorageProfileListSnafu)?; - Ok(Json(profiles.into_iter().map(Into::into).collect())) -} diff --git a/crates/runtime/src/http/ui/old_handlers/tables.rs b/crates/runtime/src/http/ui/old_handlers/tables.rs deleted file mode 100644 index 321ad6162..000000000 --- a/crates/runtime/src/http/ui/old_handlers/tables.rs +++ /dev/null @@ -1,472 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use super::super::models::error::{self as model_error, NexusError, NexusResult}; -use crate::http::ui::models::properties::{ - Properties, Property, TableSettingsResponse, TableSnapshotsResponse, - TableUpdatePropertiesPayload, -}; -use crate::http::ui::models::table::{ - Table, TableCreatePayload, TableRegisterRequest, TableUploadPayload, -}; -use crate::http::{session::DFSessionId, utils::get_default_properties}; -use crate::state::AppState; -use axum::{extract::Multipart, extract::Path, extract::State, Json}; -use catalog::models::{DatabaseIdent, TableIdent, WarehouseIdent}; -use iceberg::NamespaceIdent; -use snafu::ResultExt; -use utoipa::OpenApi; -use uuid::Uuid; - -#[derive(OpenApi)] -#[openapi( - paths( - create_table, - register_table, - delete_table, - get_table, - upload_data_to_table, - get_settings, - update_table_properties, - get_snapshots, - ), - components( - schemas( - TableCreatePayload, - TableRegisterRequest, - TableUploadPayload, - Table, - Properties, - Property, - NexusError, - ) - ), - tags( - (name = "tables", description = "Tables management endpoints.") - ) -)] -pub struct ApiDoc; - -#[utoipa::path( - get, - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}", - operation_id = "getTable", - tags = ["tables"], - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ("tableName" = String, description = "Table name") - ), - responses( - (status = 200, description = "Get table", body = Table), - (status = 404, description = "Not found", body = NexusError), - (status = 422, description = "Unprocessable entity", body = NexusError), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_table( - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, -) -> NexusResult> { - let warehouse = state.get_warehouse_by_id(warehouse_id).await?; - let profile = state - .get_profile_by_id(warehouse.storage_profile_id) - .await?; - let table_ident = TableIdent { - database: DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }, - table: table_name, - }; - let mut table = state.get_table(&table_ident).await?; - table.with_details(warehouse_id, profile, database_name); - Ok(Json(table)) -} - -#[utoipa::path( - get, - operation_id = "createTable", - tags = ["tables"], - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables", - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ), - responses( - (status = 200, description = "Successful Response", body = Table), - (status = 404, description = "Not found", body = NexusError), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn create_table( - State(state): State, - Path((warehouse_id, database_name)): Path<(Uuid, String)>, - Json(payload): Json, -) -> NexusResult> { - let warehouse = state.get_warehouse_model(warehouse_id).await?; - let profile = state - .control_svc - .get_profile(warehouse.storage_profile_id) - .await - .context(model_error::StorageProfileFetchSnafu { - id: warehouse.storage_profile_id, - })?; - let db_ident = DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }; - let table = state - .catalog_svc - .create_table( - &db_ident, - &profile, - &warehouse, - payload.into(), - Option::from(get_default_properties()), - ) - .await - .context(model_error::TableCreateSnafu)?; - let mut table: Table = table.into(); - table.with_details(warehouse_id, profile.into(), database_name); - Ok(Json(table)) -} - -#[utoipa::path( - get, - operation_id = "registerTable", - tags = ["tables"], - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/register", - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ), - responses( - (status = 200, description = "Successful Response", body = Table), - (status = 404, description = "Not found", body = NexusError), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn register_table( - State(state): State, - Path((warehouse_id, database_name)): Path<(Uuid, String)>, - Json(payload): Json, -) -> NexusResult> { - let warehouse = state.get_warehouse_model(warehouse_id).await?; - let profile = state - .control_svc - .get_profile(warehouse.storage_profile_id) - .await - .context(model_error::StorageProfileFetchSnafu { - id: warehouse.storage_profile_id, - })?; - let db_ident = DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }; - let table = state - .catalog_svc - .register_table( - &db_ident, - &profile, - payload.name, - payload.metadata_location, - Option::from(get_default_properties()), - ) - .await - .context(model_error::TableRegisterSnafu)?; - let mut table: Table = table.into(); - table.with_details(warehouse_id, profile.into(), database_name); - Ok(Json(table)) -} - -#[utoipa::path( - delete, - operation_id = "deleteTable", - tags = ["tables"], - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}", - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = Uuid, description = "Database Name"), - ("tableName" = Uuid, description = "Table name") - ), - responses( - (status = 200, description = "Successful Response"), - (status = 404, description = "Not found", body=NexusError), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn delete_table( - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, -) -> NexusResult<()> { - let warehouse = state.get_warehouse_by_id(warehouse_id).await?; - let table_ident = TableIdent { - database: DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }, - table: table_name, - }; - state - .catalog_svc - .drop_table(&table_ident) - .await - .context(model_error::TableDeleteSnafu)?; - Ok(()) -} - -#[utoipa::path( - get, - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}/settings", - operation_id = "getTableSettings", - tags = ["tables"], - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ("tableName" = String, description = "Table name") - ), - responses( - (status = 200, description = "Get table", body = TableSettingsResponse), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_settings( - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, -) -> NexusResult> { - let warehouse = state.get_warehouse_by_id(warehouse_id).await?; - let profile = state - .get_profile_by_id(warehouse.storage_profile_id) - .await?; - let table_ident = TableIdent { - database: DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }, - table: table_name, - }; - let mut table = state.get_table(&table_ident).await?; - table.with_details(warehouse_id, profile, database_name); - Ok(Json(table.try_into()?)) -} - -#[utoipa::path( - post, - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}/settings", - operation_id = "updateTableProperties", - tags = ["tables"], - request_body = TableUpdatePropertiesPayload, - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ("tableName" = String, description = "Table name") - ), - responses( - (status = 200, description = "Get table", body = Table), - (status = 404, description = "Not found", body = NexusError), - (status = 422, description = "Unprocessable entity", body = NexusError), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn update_table_properties( - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, - Json(payload): Json, -) -> NexusResult> { - let warehouse = state - .control_svc - .get_warehouse(warehouse_id) - .await - .context(model_error::WarehouseFetchSnafu { id: warehouse_id })?; - let profile = state - .control_svc - .get_profile(warehouse.storage_profile_id) - .await - .context(model_error::StorageProfileFetchSnafu { - id: warehouse.storage_profile_id, - })?; - let table_ident = TableIdent { - database: DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }, - table: table_name, - }; - let table = state.get_table(&table_ident).await?; - let updated_table = state - .catalog_svc - .update_table( - &profile, - &warehouse, - payload.to_commit(&table, &table_ident), - ) - .await - .context(model_error::TablePropertiesUpdateSnafu)?; - let mut table: Table = updated_table.into(); - table.with_details(warehouse_id, profile.into(), database_name); - Ok(Json(table)) -} - -#[utoipa::path( - post, - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}/upload", - operation_id = "tableUpload", - tags = ["tables"], - params( - ("warehouseId" = Uuid, Path, description = "Warehouse ID"), - ("databaseName" = Uuid, Path, description = "Database Name"), - ("tableName" = Uuid, Path, description = "Table name") - ), - request_body( - content = TableUploadPayload, - content_type = "multipart/form-data", - description = "Upload data to the table in multipart/form-data format" - ), - responses( - (status = 200, description = "Successful Response"), - (status = 422, description = "Unprocessable entity", body = NexusError), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state, multipart), err, ret(level = tracing::Level::TRACE))] -pub async fn upload_data_to_table( - DFSessionId(session_id): DFSessionId, - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, - mut multipart: Multipart, -) -> NexusResult<()> { - loop { - let next_field = multipart - .next_field() - .await - .context(model_error::MalformedMultipartSnafu)?; - match next_field { - Some(field) => { - if field.name().ok_or(NexusError::MalformedFileUploadRequest)? != "uploadFile" { - continue; - } - let file_name = field - .file_name() - .ok_or(NexusError::MalformedFileUploadRequest)? - .to_string(); - let data = field - .bytes() - .await - .context(model_error::MalformedMultipartSnafu)?; - - state - .control_svc - .upload_data_to_table( - &session_id, - &warehouse_id, - &database_name, - &table_name, - data, - file_name, - ) - .await - .context(model_error::DataUploadSnafu)?; - } - None => { - break; - } - } - } - Ok(()) -} - -#[utoipa::path( - get, - path = "/ui/warehouses/{warehouseId}/databases/{databaseName}/tables/{tableName}/snapshots", - operation_id = "getTableSnapshots", - tags = ["tables"], - params( - ("warehouseId" = Uuid, description = "Warehouse ID"), - ("databaseName" = String, description = "Database Name"), - ("tableName" = String, description = "Table name") - ), - responses( - (status = 200, description = "Get table", body = TableSnapshotsResponse), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_snapshots( - State(state): State, - Path((warehouse_id, database_name, table_name)): Path<(Uuid, String, String)>, -) -> NexusResult> { - let warehouse = state.get_warehouse_by_id(warehouse_id).await?; - let profile = state - .get_profile_by_id(warehouse.storage_profile_id) - .await?; - let table_ident = TableIdent { - database: DatabaseIdent { - warehouse: WarehouseIdent::new(warehouse.id), - namespace: NamespaceIdent::from_vec( - database_name - .split('.') - .map(String::from) - .collect::>(), - ) - .context(model_error::MalformedNamespaceIdentSnafu)?, - }, - table: table_name, - }; - let mut table = state.get_table(&table_ident).await?; - table.with_details(warehouse_id, profile, database_name); - Ok(Json(table.try_into()?)) -} diff --git a/crates/runtime/src/http/ui/old_handlers/warehouses.rs b/crates/runtime/src/http/ui/old_handlers/warehouses.rs deleted file mode 100644 index 74056e85b..000000000 --- a/crates/runtime/src/http/ui/old_handlers/warehouses.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use super::super::models::error::{self as model_error, NexusError, NexusResult}; -use crate::http::ui::models::table::Statistics; -use crate::http::ui::models::warehouse::{ - CreateWarehousePayload, Navigation, Warehouse, WarehousesDashboard, -}; -use crate::state::AppState; -use axum::{extract::Path, extract::State, Json}; -use control_plane::models::{Warehouse as WarehouseModel, WarehouseCreateRequest}; -use snafu::ResultExt; -use utoipa::OpenApi; -use uuid::Uuid; - -#[derive(OpenApi)] -#[openapi( - paths( - navigation, - get_warehouse, - list_warehouses, - create_warehouse, - delete_warehouse, - ), - components( - schemas( - CreateWarehousePayload, - Warehouse, - Navigation, - NexusError, - ) - ), - tags( - (name = "warehouses", description = "Warehouse management endpoints.") - ) -)] -pub struct ApiDoc; - -#[utoipa::path( - get, - path = "/ui/navigation_trees", - tags = ["warehouses"], - operation_id = "warehousesNavigation", - responses( - (status = 200, description = "List all warehouses fot navigation_trees", body = Navigation), - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn navigation(State(state): State) -> NexusResult> { - let warehouses = state.list_warehouses().await?; - Ok(Json(Navigation { warehouses })) -} -#[utoipa::path( - get, - path = "/ui/warehouses", - tags = ["warehouses"], - operation_id = "warehousesDashboard", - responses( - (status = 200, description = "List all warehouses", body = WarehousesDashboard), - (status = 500, description = "List all warehouses error", body = NexusError), - - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn list_warehouses( - State(state): State, -) -> NexusResult> { - let warehouses = state.list_warehouses().await?; - - let mut total_statistics = Statistics::default(); - let dashboards = warehouses - .into_iter() - .inspect(|warehouse| { - total_statistics = total_statistics.aggregate(&warehouse.statistics); - }) - .collect(); - - Ok(Json(WarehousesDashboard { - warehouses: dashboards, - statistics: total_statistics, - compaction_summary: None, - })) -} - -#[utoipa::path( - get, - path = "/ui/warehouses/{warehouseId}", - operation_id = "getWarehouse", - tags = ["warehouses"], - params( - ("warehouseId" = Uuid, Path, description = "Warehouse ID") - ), - responses( - (status = 200, description = "Warehouse found", body = Warehouse), - (status = 404, description = "Warehouse not found", body = NexusError) - ) -)] -#[allow( - clippy::cast_possible_wrap, - clippy::cast_possible_truncation, - clippy::as_conversions -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn get_warehouse( - State(state): State, - Path(warehouse_id): Path, -) -> NexusResult> { - let mut warehouse = state.get_warehouse_by_id(warehouse_id).await?; - let profile = state - .get_profile_by_id(warehouse.storage_profile_id) - .await?; - let databases = state.list_databases(warehouse_id, profile.clone()).await?; - - let mut total_statistics = Statistics { - database_count: Some(databases.len() as i32), - ..Default::default() - }; - for database in &databases { - let stats = database.clone().statistics; - total_statistics = total_statistics.aggregate(&stats); - } - warehouse.storage_profile = profile; - warehouse.databases = databases; - warehouse.statistics = total_statistics; - Ok(Json(warehouse)) -} - -#[utoipa::path( - post, - path = "/ui/warehouses", - request_body = CreateWarehousePayload, - operation_id = "createWarehouse", - tags = ["warehouses"], - responses( - (status = 201, description = "Warehouse created", body = Warehouse), - (status = 422, description = "Unprocessable Entity", body = NexusError), - (status = 500, description = "Internal server error", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn create_warehouse( - State(state): State, - Json(payload): Json, -) -> NexusResult> { - let request: WarehouseCreateRequest = payload.into(); - let profile = state.get_profile_by_id(request.storage_profile_id).await?; - let warehouses = state - .control_svc - .list_warehouses() - .await - .context(model_error::WarehouseListSnafu)?; - - if warehouses.iter().any(|w| w.name == request.name) { - return Err(NexusError::WarehouseAlreadyExists { - name: request.name.clone(), - }); - } - let warehouse: WarehouseModel = state - .control_svc - .create_warehouse(&request) - .await - .context(model_error::WarehouseCreateSnafu)?; - let mut warehouse: Warehouse = warehouse.into(); - warehouse.storage_profile = profile; - Ok(Json(warehouse)) -} - -#[utoipa::path( - delete, - path = "/ui/warehouses/{warehouseId}", - operation_id = "deleteWarehouse", - tags = ["warehouses"], - params( - ("warehouseId" = Uuid, Path, description = "Warehouse ID") - ), - responses( - (status = 204, description = "Warehouse deleted"), - (status = 404, description = "Warehouse not found", body = NexusError) - ) -)] -#[tracing::instrument(level = "debug", skip(state), err, ret(level = tracing::Level::TRACE))] -pub async fn delete_warehouse( - State(state): State, - Path(warehouse_id): Path, -) -> NexusResult> { - state - .control_svc - .delete_warehouse(warehouse_id) - .await - .context(model_error::WarehouseDeleteSnafu { id: warehouse_id })?; - Ok(Json(())) -} diff --git a/crates/runtime/src/http/ui/old_models/aws.rs b/crates/runtime/src/http/ui/old_models/aws.rs deleted file mode 100644 index 8cb678182..000000000 --- a/crates/runtime/src/http/ui/old_models/aws.rs +++ /dev/null @@ -1,199 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use control_plane::models; -use serde::ser::SerializeStruct; -use serde::{Deserialize, Serialize, Serializer}; -use utoipa::ToSchema; -use validator::Validate; - -#[derive(Debug, Clone, PartialEq, Eq, Default, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct AwsAccessKeyCredential { - #[validate(length(min = 1))] - pub aws_access_key_id: String, - #[validate(length(min = 1))] - pub aws_secret_access_key: String, -} - -impl AwsAccessKeyCredential { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(aws_access_key_id: String, aws_secret_access_key: String) -> Self { - Self { - aws_access_key_id, - aws_secret_access_key, - } - } -} - -impl Serialize for AwsAccessKeyCredential { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut state = serializer.serialize_struct("AwsAccessKeyCredential", 2)?; - state.serialize_field("awsAccessKeyId", &self.aws_access_key_id)?; - state.serialize_field("awsSecretAccessKey", &"********")?; - state.end() - } -} - -impl From for models::AwsAccessKeyCredential { - fn from(credential: AwsAccessKeyCredential) -> Self { - Self { - aws_access_key_id: credential.aws_access_key_id, - aws_secret_access_key: credential.aws_secret_access_key, - } - } -} -impl From for AwsAccessKeyCredential { - fn from(credential: models::AwsAccessKeyCredential) -> Self { - Self { - aws_access_key_id: credential.aws_access_key_id, - aws_secret_access_key: credential.aws_secret_access_key, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct AwsRoleCredential { - #[validate(length(min = 1))] - pub role_arn: String, - #[validate(length(min = 1))] - pub external_id: String, -} - -impl AwsRoleCredential { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(role_arn: String, external_id: String) -> Self { - Self { - role_arn, - external_id, - } - } -} - -impl From for models::AwsRoleCredential { - fn from(credential: AwsRoleCredential) -> Self { - Self { - role_arn: credential.role_arn, - external_id: credential.external_id, - } - } -} -impl From for AwsRoleCredential { - fn from(credential: models::AwsRoleCredential) -> Self { - Self { - role_arn: credential.role_arn, - external_id: credential.external_id, - } - } -} - -#[derive( - Debug, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, - Hash, - Default, - ToSchema, -)] -#[serde(rename_all = "camelCase")] -pub enum CloudProvider { - #[serde(rename = "aws")] - #[default] - AWS, - #[serde(rename = "gcs")] - GCS, - #[serde(rename = "azure")] - AZURE, - #[serde(rename = "fs")] - FS, -} - -impl From for CloudProvider { - fn from(provider: models::CloudProvider) -> Self { - match provider { - models::CloudProvider::AWS => Self::AWS, - models::CloudProvider::GCS => Self::GCS, - models::CloudProvider::AZURE => Self::AZURE, - models::CloudProvider::FS => Self::FS, - } - } -} - -impl From for models::CloudProvider { - fn from(provider: CloudProvider) -> Self { - match provider { - CloudProvider::AWS => Self::AWS, - CloudProvider::GCS => Self::GCS, - CloudProvider::AZURE => Self::AZURE, - CloudProvider::FS => Self::FS, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] -#[serde(rename_all(serialize = "camelCase", deserialize = "camelCase"))] -#[serde(untagged)] -pub enum Credentials { - AccessKey(AwsAccessKeyCredential), - Role(AwsRoleCredential), -} - -impl Default for Credentials { - fn default() -> Self { - Self::AccessKey(AwsAccessKeyCredential::default()) - } -} -impl From for Credentials { - fn from(credentials: models::Credentials) -> Self { - match credentials { - models::Credentials::AccessKey(creds) => Self::AccessKey(AwsAccessKeyCredential::new( - creds.aws_access_key_id, - creds.aws_secret_access_key, - )), - models::Credentials::Role(creds) => { - Self::Role(AwsRoleCredential::new(creds.role_arn, creds.external_id)) - } - } - } -} - -impl From for models::Credentials { - fn from(credentials: Credentials) -> Self { - match credentials { - Credentials::AccessKey(creds) => Self::AccessKey(models::AwsAccessKeyCredential { - aws_access_key_id: creds.aws_access_key_id, - aws_secret_access_key: creds.aws_secret_access_key, - }), - Credentials::Role(creds) => Self::Role(models::AwsRoleCredential { - role_arn: creds.role_arn, - external_id: creds.external_id, - }), - } - } -} diff --git a/crates/runtime/src/http/ui/old_models/database.rs b/crates/runtime/src/http/ui/old_models/database.rs deleted file mode 100644 index 3bd615e59..000000000 --- a/crates/runtime/src/http/ui/old_models/database.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::http::ui::models::storage_profile::StorageProfile; -use crate::http::ui::models::table::{Statistics, Table}; -use catalog::models; -use chrono::DateTime; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use utoipa::ToSchema; -use uuid::Uuid; -use validator::Validate; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateDatabasePayload { - pub name: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub properties: Option>, -} - -impl CreateDatabasePayload { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(name: String) -> Self { - Self { - name, - properties: None, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct Database { - pub id: Uuid, - pub name: String, - pub tables: Vec
, - pub storage_profile: StorageProfile, - pub properties: HashMap, - pub warehouse_id: Uuid, - pub statistics: Statistics, - #[serde(skip_serializing_if = "Option::is_none")] - pub compaction_summary: Option, - pub created_at: DateTime, - pub updated_at: DateTime, -} - -impl Database { - pub fn with_details( - &mut self, - warehouse_id: Uuid, - profile: &StorageProfile, - mut tables: Vec
, - ) { - self.storage_profile = profile.clone(); - - let mut total_statistics = Statistics::default(); - for t in &mut tables { - t.with_details(warehouse_id, profile.clone(), self.name.clone()); - total_statistics = total_statistics.aggregate(&t.statistics); - } - total_statistics.database_count = Some(1); - - self.statistics = total_statistics; - self.warehouse_id = warehouse_id; - self.tables = tables; - - if let Some(created_at) = self.properties.get("created_at") { - if let Ok(created_at) = chrono::DateTime::parse_from_rfc3339(created_at) { - self.created_at = DateTime::from(created_at); - } - } - if let Some(updated_at) = self.properties.get("updated_at") { - if let Ok(updated_at) = chrono::DateTime::parse_from_rfc3339(updated_at) { - self.updated_at = DateTime::from(updated_at); - } - } - } -} - -impl From for Database { - fn from(db: models::Database) -> Self { - Self { - id: get_database_id(&db.ident), - name: db.ident.namespace.join("."), - tables: vec![], - warehouse_id: Uuid::default(), - created_at: DateTime::default(), - updated_at: DateTime::default(), - statistics: Statistics::default(), - compaction_summary: None, - properties: db.properties, - storage_profile: StorageProfile::default(), - } - } -} - -#[must_use] -pub fn get_database_id(ident: &models::DatabaseIdent) -> Uuid { - Uuid::new_v5(&Uuid::NAMESPACE_DNS, ident.namespace.join("__").as_bytes()) -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CompactionSummary { - pub compactions: i32, - pub starting_files: i32, - pub rewritten_files: i32, - pub file_percent: i32, - pub starting_size: i32, - pub rewritten_size: i32, - pub size_change: i32, - pub size_percent: i32, -} - -impl CompactionSummary { - #[allow(clippy::new_without_default, clippy::too_many_arguments)] - #[must_use] - pub const fn new( - compactions: i32, - starting_files: i32, - rewritten_files: i32, - file_percent: i32, - starting_size: i32, - rewritten_size: i32, - size_change: i32, - size_percent: i32, - ) -> Self { - Self { - compactions, - starting_files, - rewritten_files, - file_percent, - starting_size, - rewritten_size, - size_change, - size_percent, - } - } -} diff --git a/crates/runtime/src/http/ui/old_models/error.rs b/crates/runtime/src/http/ui/old_models/error.rs deleted file mode 100644 index b5c292a4d..000000000 --- a/crates/runtime/src/http/ui/old_models/error.rs +++ /dev/null @@ -1,211 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; -use catalog::error::CatalogError; -use catalog::models::{DatabaseIdent, TableIdent}; -use control_plane::error::ControlPlaneError; -use snafu::prelude::*; -use utoipa::{PartialSchema, ToSchema}; -use uuid::Uuid; - -#[derive(Snafu, Debug)] -#[snafu(visibility(pub(crate)))] -pub enum NexusError { - #[snafu(display("Failed to fetch warehouse with id {id}"))] - WarehouseFetch { id: Uuid, source: ControlPlaneError }, - - #[snafu(display("Failed to fetch storage profile with id {id}"))] - StorageProfileFetch { id: Uuid, source: ControlPlaneError }, - - #[snafu(display("Failed to fetch database with id {id}"))] - DatabaseFetch { - id: DatabaseIdent, - source: CatalogError, - }, - - #[snafu(display("Failed to list warehouses"))] - WarehouseList { source: ControlPlaneError }, - - #[snafu(display("Failed to list database models for warehouse with id {id}"))] - DatabaseModelList { id: Uuid, source: CatalogError }, - - #[snafu(display("Failed to list namespaces for warehouse with id {id}"))] - NamespaceList { id: Uuid, source: CatalogError }, - - #[snafu(display("Failed to list tables for database with id {id}"))] - TableList { - id: DatabaseIdent, - source: CatalogError, - }, - - #[snafu(display("Failed to create table"))] - TableCreate { source: CatalogError }, - - #[snafu(display("Failed to fetch table with id {id}"))] - TableFetch { - id: TableIdent, - source: CatalogError, - }, - - #[snafu(display("Failed to delete table"))] - TableDelete { source: CatalogError }, - - #[snafu(display("Failed to register table"))] - TableRegister { source: CatalogError }, - - #[snafu(display("Database with name {name} already exists"))] - DatabaseAlreadyExists { name: String }, - - #[snafu(display("Failed to create database with ident {ident}"))] - DatabaseCreate { - ident: DatabaseIdent, - source: CatalogError, - }, - - #[snafu(display("Failed to delete database with ident {ident}"))] - DatabaseDelete { - ident: DatabaseIdent, - source: CatalogError, - }, - - #[snafu(display("Failed to create storage profile"))] - StorageProfileCreate { source: ControlPlaneError }, - - #[snafu(display("Failed to delete storage profile with id {id}"))] - StorageProfileDelete { id: Uuid, source: ControlPlaneError }, - - #[snafu(display("Failed to list storage profiles"))] - StorageProfileList { source: ControlPlaneError }, - - #[snafu(display("Query error: {source}"))] - Query { source: ControlPlaneError }, - - #[snafu(display("Failed to update table properties"))] - TablePropertiesUpdate { source: CatalogError }, - - #[snafu(display("Failed to upload data to table: {source}"))] - DataUpload { source: ControlPlaneError }, - - #[snafu(display("Failed to create warehouse"))] - WarehouseCreate { source: ControlPlaneError }, - - #[snafu(display("Warehouse already exists with name {name}"))] - WarehouseAlreadyExists { name: String }, - - #[snafu(display("Failed to delete warehouse with id {id}"))] - WarehouseDelete { id: Uuid, source: ControlPlaneError }, - - #[snafu(display("Malformed namespace ident"))] - MalformedNamespaceIdent { source: iceberg::Error }, - - #[snafu(display("Invalid Iceberg snapshot timestamp"))] - InvalidIcebergSnapshotTimestamp { source: iceberg::Error }, - - #[snafu(display("Malformed multipart message"))] - MalformedMultipart { - source: axum::extract::multipart::MultipartError, - }, - - #[snafu(display("Malformed file upload request"))] - MalformedFileUploadRequest, - - #[snafu(display("Failed to parse table metadata"))] - ParseTableMetadata { - source: Box, - field: String, - }, - - #[snafu(display("Missing Session ID"))] - MissingSessionId, -} - -pub type NexusResult = std::result::Result; - -impl ToSchema for NexusError {} - -impl PartialSchema for NexusError { - fn schema() -> utoipa::openapi::RefOr { - utoipa::openapi::ObjectBuilder::new() - .property( - "error", - utoipa::openapi::ObjectBuilder::new().schema_type(utoipa::openapi::Type::String), - ) - .required("error") - .into() - } -} - -impl IntoResponse for NexusError { - fn into_response(self) -> Response { - let status = match &self { - Self::StorageProfileFetch { id: _, source } - | Self::WarehouseFetch { id: _, source } - | Self::StorageProfileDelete { id: _, source } => match source { - ControlPlaneError::StorageProfileNotFound { .. } => StatusCode::NOT_FOUND, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }, - Self::DatabaseFetch { .. } - | Self::WarehouseList { .. } - | Self::DatabaseModelList { .. } - | Self::NamespaceList { .. } - | Self::TableList { .. } - | Self::DatabaseCreate { .. } - | Self::StorageProfileCreate { .. } - | Self::Query { .. } - | Self::StorageProfileList { .. } - | Self::DataUpload { .. } - | Self::WarehouseCreate { .. } - | Self::InvalidIcebergSnapshotTimestamp { .. } - | Self::ParseTableMetadata { .. } => StatusCode::INTERNAL_SERVER_ERROR, - - Self::DatabaseAlreadyExists { .. } | Self::WarehouseAlreadyExists { .. } => { - StatusCode::CONFLICT - } - - Self::MalformedNamespaceIdent { .. } - | Self::MalformedMultipart { .. } - | Self::MalformedFileUploadRequest - | Self::MissingSessionId => StatusCode::BAD_REQUEST, - - Self::TableFetch { id: _, source } - | Self::TableDelete { source } - | Self::TablePropertiesUpdate { source } => match source { - CatalogError::TableNotFound { .. } => StatusCode::NOT_FOUND, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }, - - Self::TableCreate { source } | Self::TableRegister { source } => match source { - CatalogError::TableAlreadyExists { .. } => StatusCode::CONFLICT, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }, - - Self::DatabaseDelete { ident: _, source } => match source { - CatalogError::DatabaseNotFound { .. } => StatusCode::NOT_FOUND, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }, - - Self::WarehouseDelete { id: _, source } => match source { - ControlPlaneError::WarehouseNotFound { .. } => StatusCode::NOT_FOUND, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }, - }; - - (status, self.to_string()).into_response() - } -} diff --git a/crates/runtime/src/http/ui/old_models/mod.rs b/crates/runtime/src/http/ui/old_models/mod.rs deleted file mode 100644 index b248758bc..000000000 --- a/crates/runtime/src/http/ui/old_models/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. diff --git a/crates/runtime/src/http/ui/old_models/properties.rs b/crates/runtime/src/http/ui/old_models/properties.rs deleted file mode 100644 index 67b73d2f6..000000000 --- a/crates/runtime/src/http/ui/old_models/properties.rs +++ /dev/null @@ -1,430 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use super::error::{self as model_error, NexusError, NexusResult}; -use crate::http::ui::models::table::Table; -use catalog::models::{TableCommit, TableIdent}; -use chrono::{DateTime, Utc}; -use iceberg::spec::Operation; -use iceberg::TableUpdate; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use std::collections::HashMap; -use utoipa::ToSchema; -use validator::Validate; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableSnapshotsResponse { - snapshots: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableSnapshot { - pub timestamp: DateTime, - pub operation: String, - pub total_records: i32, - pub added_records: i32, - pub deleted_records: i32, - pub snapshot_id: i64, -} - -impl TryFrom
for TableSnapshotsResponse { - type Error = NexusError; - - fn try_from(table: Table) -> NexusResult { - let mut snapshots = vec![]; - // Sort the snapshots by timestamp - - for (_, snapshot) in table.metadata.0.snapshots { - let operation = match snapshot.summary().operation { - Operation::Append => "append", - Operation::Overwrite => "overwrite", - Operation::Replace => "replace", - Operation::Delete => "delete", - }; - snapshots.push(TableSnapshot { - timestamp: snapshot - .timestamp() - .context(model_error::InvalidIcebergSnapshotTimestampSnafu)?, - operation: operation.to_string(), - total_records: snapshot - .summary() - .other - .get("total-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0), - added_records: snapshot - .summary() - .other - .get("added-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0), - deleted_records: snapshot - .summary() - .other - .get("deleted-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0), - snapshot_id: snapshot.snapshot_id(), - }); - } - snapshots.sort_by(|a, b| b.timestamp.cmp(&a.timestamp)); - - Ok(Self { snapshots }) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableSettingsResponse { - snapshots_management: SnapshotsManagement, - automatic_compaction: AutomaticCompaction, - lifecycle_policies: LifecyclePolicies, - user_managed: UserManaged, -} - -impl TryFrom
for TableSettingsResponse { - type Error = NexusError; - - #[allow(clippy::match_same_arms)] - fn try_from(table: Table) -> NexusResult { - let mut response = Self::default(); - for (id, value) in &table.metadata.0.properties { - match id.as_str() { - "history.expire.max-snapshot-age-ms" => { - response.snapshots_management.max_snapshot_age_ms = value.parse().ok(); - } - "history.expire.min-snapshots-to-keep" => { - response.snapshots_management.min_snapshots_to_keep = value.parse().ok(); - } - "history.expire.max-ref-age-ms" => { - response.snapshots_management.max_ref_age_ms = value.parse().ok(); - } - "compaction.enabled" => { - response.automatic_compaction.enabled = value.parse().ok(); - } - "lifecycle.enabled" => { - response.lifecycle_policies.enabled = value.parse().ok(); - } - "lifecycle.max-data-age-ms" => { - response.lifecycle_policies.max_data_age_ms = value.parse().ok(); - } - "lifecycle.data-age-column" => { - response.lifecycle_policies.data_age_column = Some(value.clone()); - } - _ => { - if id.starts_with("user_managed.") { - response.user_managed.items.push(Property { - id: id.clone(), - value: value.clone(), - }); - } - #[allow(clippy::unwrap_used)] - if id.starts_with("lifecycle.columns.") && id.ends_with(".max-data-age-ms") { - let column_name = id - .strip_prefix("lifecycle.columns.") - .unwrap() - .split('.') - .next() - .unwrap() - .to_string(); - let transform = match table - .metadata - .0 - .properties - .get(&format!("lifecycle.columns.{column_name}.transform")) - .unwrap() - .as_str() - { - "nullify" => ColumnTransform::Nullify, - _ => ColumnTransform::Nullify, - }; - let column_policy = ColumnLevelPolicy { - column_name, - max_data_age_ms: value.parse().unwrap(), - transform, - }; - response - .lifecycle_policies - .columns_max_data_age_ms - .get_or_insert_with(Vec::new) - .push(column_policy); - } - } - } - } - Ok(response) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableUpdatePropertiesPayload { - pub properties: Properties, -} - -impl TableUpdatePropertiesPayload { - pub(crate) fn to_commit(&self, table: &Table, ident: &TableIdent) -> TableCommit { - match &self.properties { - Properties::SnapshotsManagement(p) => p.to_commit(ident), - Properties::AutomaticCompaction(p) => p.to_commit(ident), - Properties::LifecyclePolicies(p) => p.to_commit(ident, &table.metadata.0.properties), - Properties::UserManaged(p) => p.to_commit(table, ident), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase", tag = "type")] -pub enum Properties { - SnapshotsManagement(SnapshotsManagement), - AutomaticCompaction(AutomaticCompaction), - LifecyclePolicies(LifecyclePolicies), - UserManaged(UserManaged), -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct SnapshotsManagement { - /// A positive number for the minimum number of snapshots to keep in a branch while expiring snapshots. - /// Defaults to table property history.expire.min-snapshots-to-keep. - #[serde(skip_serializing_if = "Option::is_none")] - min_snapshots_to_keep: Option, - /// A positive number for the max age of snapshots to keep when expiring, including the latest snapshot. - /// Defaults to table property history.expire.max-snapshot-age-ms. - #[serde(skip_serializing_if = "Option::is_none")] - max_snapshot_age_ms: Option, - /// For snapshot references except the main branch, a positive number for the max age of the snapshot reference to keep while expiring snapshots. - /// Defaults to table property history.expire.max-ref-age-ms. The main branch never expires. - #[serde(skip_serializing_if = "Option::is_none")] - max_ref_age_ms: Option, -} - -impl SnapshotsManagement { - #[must_use] - pub fn to_commit(&self, ident: &TableIdent) -> TableCommit { - let mut properties = HashMap::new(); - let mut properties_to_remove = vec![]; - - if let Some(min_snapshots_to_keep) = self.min_snapshots_to_keep { - properties.insert( - "history.expire.min-snapshots-to-keep".to_string(), - min_snapshots_to_keep.to_string(), - ); - } else { - properties_to_remove.push("history.expire.min-snapshots-to-keep".to_string()); - } - if let Some(max_snapshot_age_ms) = self.max_snapshot_age_ms { - properties.insert( - "history.expire.max-snapshot-age-ms".to_string(), - max_snapshot_age_ms.to_string(), - ); - } else { - properties_to_remove.push("history.expire.max-snapshot-age-ms".to_string()); - } - if let Some(max_ref_age_ms) = self.max_ref_age_ms { - properties.insert( - "history.expire.max-ref-age-ms".to_string(), - max_ref_age_ms.to_string(), - ); - } else { - properties_to_remove.push("history.expire.max-ref-age-ms".to_string()); - } - TableCommit { - ident: ident.clone(), - requirements: vec![], - updates: vec![ - TableUpdate::RemoveProperties { - removals: properties_to_remove, - }, - TableUpdate::SetProperties { - updates: properties, - }, - ], - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct AutomaticCompaction { - enabled: Option, -} - -impl AutomaticCompaction { - fn to_commit(&self, ident: &TableIdent) -> TableCommit { - let mut properties = HashMap::new(); - let mut properties_to_remove = vec![]; - if let Some(enabled) = self.enabled { - properties.insert("compaction.enabled".to_string(), enabled.to_string()); - } else { - properties_to_remove.push("compaction.enabled".to_string()); - } - TableCommit { - ident: ident.clone(), - requirements: vec![], - updates: vec![ - TableUpdate::RemoveProperties { - removals: properties_to_remove, - }, - TableUpdate::SetProperties { - updates: properties, - }, - ], - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LifecyclePolicies { - enabled: Option, - // This column indicates when a given record originated - data_age_column: Option, - // Row level policy - max_data_age_ms: Option, - // Column level policy - columns_max_data_age_ms: Option>, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ColumnLevelPolicy { - column_name: String, - max_data_age_ms: i64, - transform: ColumnTransform, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] -pub enum ColumnTransform { - Nullify, -} - -impl LifecyclePolicies { - fn to_commit(&self, ident: &TableIdent, properties: &HashMap) -> TableCommit { - let mut properties_to_add = HashMap::new(); - let mut properties_to_remove = vec![]; - - for k in properties.keys() { - if k.starts_with("lifecycle.") { - properties_to_remove.push(k.clone()); - } - } - - if let Some(enabled) = self.enabled { - properties_to_add.insert("lifecycle.enabled".to_string(), enabled.to_string()); - if let Some(data_age_column) = &self.data_age_column { - properties_to_add.insert( - "lifecycle.data-age-column".to_string(), - data_age_column.clone(), - ); - } - - if let Some(max_data_age_ms) = self.max_data_age_ms { - properties_to_add.insert( - "lifecycle.max-data-age-ms".to_string(), - max_data_age_ms.to_string(), - ); - } - - if let Some(columns_max_data_age_ms) = &self.columns_max_data_age_ms { - for column_policy in columns_max_data_age_ms { - properties_to_add.insert( - format!( - "lifecycle.columns.{}.max-data-age-ms", - column_policy.column_name - ), - column_policy.max_data_age_ms.to_string(), - ); - properties_to_add.insert( - format!("lifecycle.columns.{}.transform", column_policy.column_name), - match column_policy.transform { - ColumnTransform::Nullify => "nullify".to_string(), - }, - ); - } - } - } - TableCommit { - ident: ident.clone(), - requirements: vec![], - updates: vec![ - TableUpdate::RemoveProperties { - removals: properties_to_remove, - }, - TableUpdate::SetProperties { - updates: properties_to_add, - }, - ], - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -pub struct UserManaged { - items: Vec, -} - -impl UserManaged { - fn to_commit(&self, table: &Table, ident: &TableIdent) -> TableCommit { - // Find the properties that are managed by the user with prefix user_managed from the table - let user_managed_prefix = "user_managed."; - - let properties_to_remove = table - .metadata - .0 - .properties - .iter() - .filter(|(k, _)| k.starts_with(user_managed_prefix)) - .map(|(k, _)| k.clone()) - .collect(); - - let properties = self - .items - .iter() - .map(|p| { - let id = if p.id.starts_with(user_managed_prefix) { - p.id.clone() - } else { - format!("{}{}", user_managed_prefix, p.id) - }; - (id, p.value.clone()) - }) - .collect(); - - TableCommit { - ident: ident.clone(), - requirements: vec![], - updates: vec![ - TableUpdate::RemoveProperties { - removals: properties_to_remove, - }, - TableUpdate::SetProperties { - updates: properties, - }, - ], - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -pub struct Property { - id: String, - value: String, -} diff --git a/crates/runtime/src/http/ui/old_models/storage_profile.rs b/crates/runtime/src/http/ui/old_models/storage_profile.rs deleted file mode 100644 index 90d09b6d9..000000000 --- a/crates/runtime/src/http/ui/old_models/storage_profile.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::http::ui::models::aws::{CloudProvider, Credentials}; -use chrono::{DateTime, Utc}; -use control_plane::models; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use validator::Validate; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateStorageProfilePayload { - #[serde(rename = "type")] - pub r#type: CloudProvider, - #[validate(length(min = 1))] - pub region: Option, - #[validate(length(min = 6, max = 63))] - pub bucket: Option, - pub credentials: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub sts_role_arn: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub endpoint: Option, -} - -impl From for models::StorageProfileCreateRequest { - fn from(payload: CreateStorageProfilePayload) -> Self { - Self { - r#type: payload.r#type.into(), - region: payload.region, - bucket: payload.bucket, - credentials: payload.credentials.map(std::convert::Into::into), - sts_role_arn: payload.sts_role_arn, - endpoint: payload.endpoint, - validate_credentials: Option::from(false), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct StorageProfile { - #[serde(rename = "type")] - pub r#type: CloudProvider, - #[validate(length(min = 1))] - pub region: Option, - #[validate(length(min = 6, max = 63))] - pub bucket: Option, - pub credentials: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub sts_role_arn: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub endpoint: Option, - pub id: uuid::Uuid, - pub created_at: DateTime, - pub updated_at: DateTime, -} - -impl From for StorageProfile { - fn from(profile: models::StorageProfile) -> Self { - Self { - r#type: profile.r#type.into(), - region: profile.region, - bucket: profile.bucket, - credentials: profile.credentials.map(std::convert::Into::into), - sts_role_arn: profile.sts_role_arn, - endpoint: profile.endpoint, - id: profile.id, - created_at: DateTime::from_naive_utc_and_offset(profile.created_at, Utc), - updated_at: DateTime::from_naive_utc_and_offset(profile.updated_at, Utc), - } - } -} diff --git a/crates/runtime/src/http/ui/old_models/table.rs b/crates/runtime/src/http/ui/old_models/table.rs deleted file mode 100644 index 6607097db..000000000 --- a/crates/runtime/src/http/ui/old_models/table.rs +++ /dev/null @@ -1,408 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -use crate::execution::query::IceBucketQueryContext; -use crate::http::ui::models::database::CompactionSummary; -use crate::http::ui::models::storage_profile::StorageProfile; -use catalog::models as CatalogModels; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use utoipa::openapi::SchemaFormat::KnownFormat; -use utoipa::openapi::{ObjectBuilder, Ref, RefOr, Type}; -use utoipa::{PartialSchema, ToSchema}; -use uuid::Uuid; -use validator::Validate; - -#[must_use] -pub fn get_table_id(ident: &CatalogModels::TableIdent) -> Uuid { - Uuid::new_v5(&Uuid::NAMESPACE_DNS, ident.table.as_bytes()) -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TableMetadataWrapper(pub TableMetadata); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UnboundPartitionSpecWrapper(pub(crate) UnboundPartitionSpec); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SortOrderWrapper(pub(crate) SortOrder); - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableRegisterRequest { - pub name: String, - pub metadata_location: String, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Validate)] -#[serde(rename_all = "camelCase")] -pub struct TableCreatePayload { - pub name: String, - pub location: Option, - pub schema: SchemaWrapper, - pub partition_spec: Option, - pub sort_order: Option, - pub stage_create: Option, - pub properties: Option>, -} - -impl From for catalog::models::TableCreation { - fn from(payload: TableCreatePayload) -> Self { - Self { - name: payload.name, - location: payload.location, - schema: payload.schema.0, - partition_spec: payload.partition_spec.map(|x| x.0), - sort_order: payload.sort_order.map(|x| x.0), - properties: payload.properties.unwrap_or_default(), - } - } -} - -impl ToSchema for TableCreatePayload { - fn name() -> std::borrow::Cow<'static, str> { - std::borrow::Cow::Borrowed("TableCreatePayload") - } -} - -impl PartialSchema for TableCreatePayload { - fn schema() -> RefOr { - RefOr::from(utoipa::openapi::Schema::Object( - ObjectBuilder::new() - .property("name", String::schema()) - .property("location", Option::::schema()) - .property("schema", Ref::new("#/components/schemas/Schema")) - .property( - "partition_spec", - Ref::new("#/components/schemas/PartitionSpec"), - ) - .property("sort_order", Ref::new("#/components/schemas/SortOrder")) - .property("stage_create", Option::::schema()) - .property("properties", Option::>::schema()) - .required("name") - .required("schema") - .build(), - )) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Table { - pub id: Uuid, - pub name: String, - pub storage_profile: StorageProfile, - pub database_name: String, - pub warehouse_id: Uuid, - pub properties: HashMap, - pub metadata: TableMetadataWrapper, - pub metadata_location: String, - pub statistics: Statistics, - #[serde(skip_serializing_if = "Option::is_none")] - pub compaction_summary: Option, - pub created_at: DateTime, - pub updated_at: DateTime, -} - -impl Table { - pub fn with_details( - &mut self, - warehouse_id: Uuid, - profile: StorageProfile, - database_name: String, - ) { - self.storage_profile = profile; - self.warehouse_id = warehouse_id; - self.database_name = database_name; - self.properties = self.properties.clone(); - if let Some(created_at) = self.properties.get("created_at") { - if let Ok(created_at) = DateTime::parse_from_rfc3339(created_at) { - self.created_at = DateTime::from(created_at); - } - } - if let Some(updated_at) = self.properties.get("updated_at") { - if let Ok(updated_at) = DateTime::parse_from_rfc3339(updated_at) { - self.updated_at = DateTime::from(updated_at); - } - } - } -} - -impl From for Table { - fn from(table: catalog::models::Table) -> Self { - Self { - id: get_table_id(&table.ident), - name: table.ident.table, - storage_profile: StorageProfile::default(), - database_name: String::default(), - warehouse_id: Uuid::default(), - properties: table.properties, - metadata: TableMetadataWrapper(table.metadata.clone()), - metadata_location: table.metadata_location, - created_at: DateTime::default(), - updated_at: DateTime::default(), - statistics: Statistics::from_table_metadata(&table.metadata), - compaction_summary: None, - } - } -} - -impl ToSchema for Table { - fn name() -> std::borrow::Cow<'static, str> { - std::borrow::Cow::Borrowed("Table") - } -} -impl PartialSchema for Table { - fn schema() -> RefOr { - RefOr::from(utoipa::openapi::Schema::Object( - ObjectBuilder::new() - .property( - "id", - ObjectBuilder::new() - .schema_type(Type::String) - .format(Some(KnownFormat(utoipa::openapi::KnownFormat::Uuid))), - ) - .property("name", String::schema()) - .property( - "storageProfile", - Ref::new("#/components/schemas/StorageProfile"), - ) - .property("databaseName", String::schema()) - .property( - "warehouseId", - ObjectBuilder::new() - .schema_type(Type::String) - .format(Some(KnownFormat(utoipa::openapi::KnownFormat::Uuid))), - ) - .property("properties", HashMap::::schema()) - .property("metadata", Ref::new("#/components/schemas/TableMetadata")) - .property("metadataLocation", String::schema()) - .property("statistics", Ref::new("#/components/schemas/Statistics")) - .property( - "compactionSummary", - Ref::new("#/components/schemas/CompactionSummary"), - ) - .property( - "createdAt", - ObjectBuilder::new() - .schema_type(Type::String) - .format(Some(KnownFormat(utoipa::openapi::KnownFormat::DateTime))), - ) - .property( - "updatedAt", - ObjectBuilder::new() - .schema_type(Type::String) - .format(Some(KnownFormat(utoipa::openapi::KnownFormat::DateTime))), - ) - .required("name") - .build(), - )) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct Statistics { - pub commit_count: i32, - pub total_bytes: i64, - pub total_rows: i64, - pub total_files: i32, - pub total_snapshots_files: i32, - pub op_append_count: i64, - pub op_overwrite_count: i64, - pub op_delete_count: i64, - pub op_replace_count: i64, - pub bytes_added: i64, - pub bytes_removed: i64, - pub rows_added: i64, - pub rows_deleted: i64, - #[serde(skip_serializing_if = "Option::is_none")] - pub table_count: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub database_count: Option, -} - -impl Statistics { - #[must_use] - pub fn from_table_metadata(metadata: &TableMetadata) -> Self { - let mut commit_count = 0; - let mut total_bytes = 0; - let mut total_rows = 0; - let mut total_files = 0; - let mut total_snapshots_files = 0; - let mut rows_deleted = 0; - let mut bytes_removed = 0; - let mut rows_added = 0; - let mut bytes_added = 0; - let mut op_append_count = 0; - let mut op_overwrite_count = 0; - let mut op_delete_count = 0; - let mut op_replace_count = 0; - - - if let Some(latest_snapshot) = metadata.current_snapshot() { - total_bytes = latest_snapshot - .summary() - .other - .get("total-files-size") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - total_rows = latest_snapshot - .summary() - .other - .get("total-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - total_files = latest_snapshot - .summary() - .other - .get("total-data-files") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - }; - - metadata.snapshots().for_each(|snapshot| { - let summary = snapshot.summary(); - commit_count += 1; - bytes_added += summary - .other - .get("added-files-size") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - rows_added += summary - .other - .get("added-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - bytes_removed += summary - .other - .get("removed-files-size") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - rows_deleted += summary - .other - .get("deleted-records") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - total_snapshots_files += summary - .other - .get("total-data-files") - .and_then(|value| value.parse::().ok()) - .unwrap_or(0); - - match summary.operation { - iceberg::spec::Operation::Append => op_append_count += 1, - iceberg::spec::Operation::Overwrite => op_overwrite_count += 1, - iceberg::spec::Operation::Delete => op_delete_count += 1, - iceberg::spec::Operation::Replace => op_replace_count += 1, - } - }); - - Self { - commit_count, - op_append_count, - op_overwrite_count, - op_delete_count, - op_replace_count, - total_bytes, - total_files, - total_snapshots_files, - bytes_added, - bytes_removed, - total_rows, - rows_added, - rows_deleted, - table_count: Option::from(1), - database_count: None, - } - } - - #[must_use] - pub fn aggregate(&self, other: &Self) -> Self { - Self { - commit_count: self.commit_count + other.commit_count, - op_append_count: self.op_append_count + other.op_append_count, - op_overwrite_count: self.op_overwrite_count + other.op_overwrite_count, - op_delete_count: self.op_delete_count + other.op_delete_count, - op_replace_count: self.op_replace_count + other.op_replace_count, - total_bytes: self.total_bytes + other.total_bytes, - bytes_added: self.bytes_added + other.bytes_added, - bytes_removed: self.bytes_removed + other.bytes_removed, - total_rows: self.total_rows + other.total_rows, - total_files: self.total_files + other.total_files, - total_snapshots_files: self.total_snapshots_files + other.total_snapshots_files, - rows_added: self.rows_added + other.rows_added, - rows_deleted: self.rows_deleted + other.rows_deleted, - table_count: self.table_count.map_or(other.table_count, |count| { - Some(count + other.table_count.unwrap_or(0)) - }), - database_count: self.database_count.map_or(other.database_count, |count| { - Some(count + other.database_count.unwrap_or(0)) - }), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct QueryCreatePayload { - pub query: String, - pub context: Option>, -} - -impl QueryCreatePayload { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(query: String) -> Self { - Self { query, context: None } - } -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct QueryResponse { - pub query: String, - pub result: String, - pub duration_seconds: f32, -} - -impl QueryResponse { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(query: String, result: String, duration_seconds: f32) -> Self { - Self { - query, - result, - duration_seconds, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SchemaWrapper(Schema); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct TableUploadPayload { - #[schema(format = "binary")] - pub upload_file: String, -} diff --git a/crates/runtime/src/http/ui/old_models/warehouse.rs b/crates/runtime/src/http/ui/old_models/warehouse.rs deleted file mode 100644 index 5a5f59442..000000000 --- a/crates/runtime/src/http/ui/old_models/warehouse.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#![allow(unused_qualifications)] - -use crate::http::ui::models::database::{CompactionSummary, Database}; -use crate::http::ui::models::storage_profile::StorageProfile; -use crate::http::ui::models::table::Statistics; -use chrono::{DateTime, Utc}; -use control_plane::models; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use validator::Validate; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct Navigation { - pub warehouses: Vec, -} - -impl Navigation { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(warehouses: Vec) -> Self { - Self { warehouses } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateWarehousePayload { - #[validate(length(min = 1))] - pub name: String, - pub storage_profile_id: uuid::Uuid, - #[validate(length(min = 1))] - pub key_prefix: String, -} - -impl CreateWarehousePayload { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(name: String, storage_profile_id: uuid::Uuid, key_prefix: String) -> Self { - Self { - name, - storage_profile_id, - key_prefix, - } - } -} - -impl From for models::WarehouseCreateRequest { - fn from(payload: CreateWarehousePayload) -> Self { - Self { - prefix: payload.key_prefix, - name: payload.name, - storage_profile_id: payload.storage_profile_id, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct Warehouse { - pub id: uuid::Uuid, - #[validate(length(min = 1))] - pub name: String, - pub databases: Vec, - pub storage_profile_id: uuid::Uuid, - #[validate(length(min = 1))] - pub key_prefix: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub external_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub location: Option, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - pub storage_profile: StorageProfile, - pub statistics: Statistics, - #[serde(skip_serializing_if = "Option::is_none")] - pub compaction_summary: Option, -} - -impl From for Warehouse { - fn from(warehouse: control_plane::models::Warehouse) -> Self { - Self { - id: warehouse.id, - key_prefix: warehouse.prefix, - name: warehouse.name, - location: Option::from(warehouse.location), - storage_profile_id: warehouse.storage_profile_id, - created_at: DateTime::from_naive_utc_and_offset(warehouse.created_at, Utc), - updated_at: DateTime::from_naive_utc_and_offset(warehouse.updated_at, Utc), - storage_profile: StorageProfile::default(), - statistics: Statistics::default(), - external_id: Option::default(), - compaction_summary: None, - databases: vec![], - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Validate, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct WarehousesDashboard { - pub warehouses: Vec, - pub statistics: Statistics, - #[serde(skip_serializing_if = "Option::is_none")] - pub compaction_summary: Option, -} - -impl WarehousesDashboard { - #[allow(clippy::new_without_default)] - #[must_use] - pub const fn new(warehouses: Vec, statistics: Statistics) -> Self { - Self { - warehouses, - statistics, - compaction_summary: None, - } - } -} diff --git a/crates/runtime/src/http/ui/queries/error.rs b/crates/runtime/src/http/ui/queries/error.rs index a0a462211..1e977753a 100644 --- a/crates/runtime/src/http/ui/queries/error.rs +++ b/crates/runtime/src/http/ui/queries/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_history::store::WorksheetsStoreError; use http::status::StatusCode; -use icebucket_history::store::WorksheetsStoreError; use snafu::prelude::*; pub type QueriesResult = Result; diff --git a/crates/runtime/src/http/ui/queries/handlers.rs b/crates/runtime/src/http/ui/queries/handlers.rs index 0f8d80c9e..ba117e805 100644 --- a/crates/runtime/src/http/ui/queries/handlers.rs +++ b/crates/runtime/src/http/ui/queries/handlers.rs @@ -29,8 +29,8 @@ use axum::{ extract::{Query, State}, Json, }; -use icebucket_history::{QueryRecord as QueryRecordItem, QueryRecordId, WorksheetId}; -use icebucket_utils::iterable::IterableEntity; +use embucket_history::{QueryRecord as QueryRecordItem, QueryRecordId, WorksheetId}; +use embucket_utils::iterable::IterableEntity; use std::collections::HashMap; use utoipa::OpenApi; diff --git a/crates/runtime/src/http/ui/queries/models.rs b/crates/runtime/src/http/ui/queries/models.rs index ac7bf4a4f..a351feffd 100644 --- a/crates/runtime/src/http/ui/queries/models.rs +++ b/crates/runtime/src/http/ui/queries/models.rs @@ -22,7 +22,7 @@ use crate::execution::models::ColumnInfo; use arrow::array::RecordBatch; use arrow_json::{writer::JsonArray, WriterBuilder}; use chrono::{DateTime, Utc}; -use icebucket_history::{QueryRecord as QueryRecordItem, QueryRecordId, QueryStatus, WorksheetId}; +use embucket_history::{QueryRecord as QueryRecordItem, QueryRecordId, QueryStatus, WorksheetId}; use indexmap::IndexMap; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -30,7 +30,7 @@ use snafu::ResultExt; use std::collections::HashMap; use utoipa::ToSchema; -pub type ExecutionContext = crate::execution::query::IceBucketQueryContext; +pub type ExecutionContext = crate::execution::query::QueryContext; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] #[serde(rename_all = "camelCase")] diff --git a/crates/runtime/src/http/ui/schemas/error.rs b/crates/runtime/src/http/ui/schemas/error.rs index 1e6bbfdaa..d0c2b4127 100644 --- a/crates/runtime/src/http/ui/schemas/error.rs +++ b/crates/runtime/src/http/ui/schemas/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_metastore::error::MetastoreError; use http::StatusCode; -use icebucket_metastore::error::MetastoreError; use snafu::prelude::*; pub type SchemasResult = Result; diff --git a/crates/runtime/src/http/ui/schemas/handlers.rs b/crates/runtime/src/http/ui/schemas/handlers.rs index 8375edb21..ecfa183ea 100644 --- a/crates/runtime/src/http/ui/schemas/handlers.rs +++ b/crates/runtime/src/http/ui/schemas/handlers.rs @@ -30,10 +30,10 @@ use axum::{ extract::{Path, Query, State}, Json, }; -use icebucket_metastore::error::MetastoreError; -use icebucket_metastore::models::IceBucketSchemaIdent; -use icebucket_metastore::IceBucketSchema; -use icebucket_utils::list_config::ListConfig; +use embucket_metastore::error::MetastoreError; +use embucket_metastore::models::SchemaIdent as MetastoreSchemaIdent; +use embucket_metastore::Schema as MetastoreSchema; +use embucket_utils::list_config::ListConfig; use std::collections::HashMap; use std::convert::From; use std::convert::Into; @@ -85,8 +85,8 @@ pub async fn create_schema( Path(database_name): Path, Json(payload): Json, ) -> SchemasResult> { - let ident = IceBucketSchemaIdent::new(database_name, payload.name); - let schema = IceBucketSchema { + let ident = MetastoreSchemaIdent::new(database_name, payload.name); + let schema = MetastoreSchema { ident, properties: Some(HashMap::new()), }; @@ -123,7 +123,7 @@ pub async fn delete_schema( Query(query): Query, Path((database_name, schema_name)): Path<(String, String)>, ) -> SchemasResult<()> { - let ident = IceBucketSchemaIdent::new(database_name, schema_name); + let ident = MetastoreSchemaIdent::new(database_name, schema_name); state .metastore .delete_schema(&ident, query.cascade.unwrap_or_default()) @@ -151,7 +151,7 @@ pub async fn get_schema( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, ) -> SchemasResult> { - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: database_name.clone(), schema: schema_name.clone(), }; @@ -191,7 +191,7 @@ pub async fn update_schema( Path((database_name, schema_name)): Path<(String, String)>, Json(schema): Json, ) -> SchemasResult> { - let schema_ident = IceBucketSchemaIdent::new(database_name, schema_name); + let schema_ident = MetastoreSchemaIdent::new(database_name, schema_name); // TODO: Implement schema renames state .metastore diff --git a/crates/runtime/src/http/ui/schemas/models.rs b/crates/runtime/src/http/ui/schemas/models.rs index 5cc1f1f54..41ea77d22 100644 --- a/crates/runtime/src/http/ui/schemas/models.rs +++ b/crates/runtime/src/http/ui/schemas/models.rs @@ -16,8 +16,8 @@ // under the License. use chrono::NaiveDateTime; -use icebucket_metastore::models::{IceBucketSchema, IceBucketSchemaIdent}; -use icebucket_metastore::RwObject; +use embucket_metastore::models::{Schema as MetastoreSchema, SchemaIdent as MetastoreSchemaIdent}; +use embucket_metastore::RwObject; use serde::{Deserialize, Serialize}; use std::convert::From; use utoipa::{IntoParams, ToSchema}; @@ -30,8 +30,8 @@ pub struct Schema { pub updated_at: NaiveDateTime, } -impl From> for Schema { - fn from(rw_schema: RwObject) -> Self { +impl From> for Schema { + fn from(rw_schema: RwObject) -> Self { Self { name: rw_schema.data.ident.schema, database: rw_schema.data.ident.database, @@ -43,10 +43,10 @@ impl From> for Schema { // TODO: Remove it when found why it can't locate .into() if only From trait implemeted #[allow(clippy::from_over_into)] -impl Into for Schema { - fn into(self) -> IceBucketSchema { - IceBucketSchema { - ident: IceBucketSchemaIdent { +impl Into for Schema { + fn into(self) -> MetastoreSchema { + MetastoreSchema { + ident: MetastoreSchemaIdent { schema: self.name, database: self.database, }, diff --git a/crates/runtime/src/http/ui/tables/error.rs b/crates/runtime/src/http/ui/tables/error.rs index a6f3bb1d1..41086b4af 100644 --- a/crates/runtime/src/http/ui/tables/error.rs +++ b/crates/runtime/src/http/ui/tables/error.rs @@ -21,8 +21,8 @@ use crate::http::ui::error::IntoStatusCode; use axum::extract::multipart; use axum::response::IntoResponse; use axum::Json; +use embucket_metastore::error::MetastoreError; use http::StatusCode; -use icebucket_metastore::error::MetastoreError; use snafu::prelude::*; pub type TablesResult = Result; diff --git a/crates/runtime/src/http/ui/tables/handlers.rs b/crates/runtime/src/http/ui/tables/handlers.rs index 7800d0bce..c4cc654ef 100644 --- a/crates/runtime/src/http/ui/tables/handlers.rs +++ b/crates/runtime/src/http/ui/tables/handlers.rs @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -use crate::execution::query::IceBucketQueryContext; +use crate::execution::query::QueryContext; use crate::http::error::ErrorResponse; use crate::http::session::DFSessionId; use crate::http::state::AppState; @@ -36,9 +36,9 @@ use axum::{ Json, }; use datafusion::arrow::csv::reader::Format; -use icebucket_metastore::error::MetastoreError; -use icebucket_metastore::{IceBucketSchemaIdent, IceBucketTableIdent}; -use icebucket_utils::list_config::ListConfig; +use embucket_metastore::error::MetastoreError; +use embucket_metastore::{SchemaIdent as MetastoreSchemaIdent, TableIdent as MetastoreTableIdent}; +use embucket_utils::list_config::ListConfig; use snafu::ResultExt; use std::time::Instant; use utoipa::OpenApi; @@ -94,7 +94,7 @@ pub async fn get_table_statistics( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> TablesResult> { - let ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let ident = MetastoreTableIdent::new(&database_name, &schema_name, &table_name); match state.metastore.get_table(&ident).await { Ok(Some(rw_object)) => { let mut total_bytes = 0; @@ -156,7 +156,7 @@ pub async fn get_table_columns_info( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> TablesResult> { - let context = IceBucketQueryContext { + let context = QueryContext { database: Some(database_name.clone()), schema: Some(schema_name.clone()), }; @@ -212,11 +212,11 @@ pub async fn get_table_preview_data( State(state): State, Path((database_name, schema_name, table_name)): Path<(String, String, String)>, ) -> TablesResult> { - let context = IceBucketQueryContext { + let context = QueryContext { database: Some(database_name.clone()), schema: Some(schema_name.clone()), }; - let ident = IceBucketTableIdent::new(&database_name, &schema_name, &table_name); + let ident = MetastoreTableIdent::new(&database_name, &schema_name, &table_name); let column_names = match state.metastore.get_table(&ident).await { Ok(Some(rw_object)) => { if let Ok(schema) = rw_object.metadata.current_schema(None) { @@ -338,7 +338,7 @@ pub async fn upload_file( .execution_svc .upload_data_to_table( &session_id, - &IceBucketTableIdent { + &MetastoreTableIdent { table: table_name.clone(), schema: schema_name.clone(), database: database_name.clone(), @@ -392,7 +392,7 @@ pub async fn get_tables( State(state): State, Path((database_name, schema_name)): Path<(String, String)>, ) -> TablesResult> { - let ident = IceBucketSchemaIdent::new(database_name, schema_name); + let ident = MetastoreSchemaIdent::new(database_name, schema_name); state .metastore .list_tables( diff --git a/crates/runtime/src/http/ui/tests/databases.rs b/crates/runtime/src/http/ui/tests/databases.rs index dfe26774d..42392c457 100644 --- a/crates/runtime/src/http/ui/tests/databases.rs +++ b/crates/runtime/src/http/ui/tests/databases.rs @@ -23,10 +23,10 @@ use crate::http::ui::databases::models::{ }; use crate::http::ui::tests::common::{req, ui_test_op, Entity, Op}; use crate::http::ui::volumes::models::{Volume, VolumeCreatePayload, VolumeCreateResponse}; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; +use embucket_metastore::VolumeType as MetastoreVolumeType; +use embucket_metastore::{Database as MetastoreDatabase, Volume as MetastoreVolume}; use http::Method; -use icebucket_metastore::IceBucketVolumeType; -use icebucket_metastore::{IceBucketDatabase, IceBucketVolume}; #[tokio::test] #[allow(clippy::too_many_lines)] @@ -34,7 +34,7 @@ use icebucket_metastore::{IceBucketDatabase, IceBucketVolume}; expected = "Failed to get error response: reqwest::Error { kind: Decode, source: Error(\"missing field `message`\", line: 1, column: 32) }" )] async fn test_ui_databases_metastore_update_bug() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; // Create volume with empty name let res = ui_test_op( @@ -42,9 +42,9 @@ async fn test_ui_databases_metastore_update_bug() { Op::Create, None, &Entity::Volume(VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::from("t"), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, }), }), ) @@ -56,7 +56,7 @@ async fn test_ui_databases_metastore_update_bug() { // Create database, Ok let expected = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test".to_string(), properties: None, volume: volume.data.name.clone(), @@ -73,7 +73,7 @@ async fn test_ui_databases_metastore_update_bug() { // Update database test -> new-test, Ok let new_database = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "new-test".to_string(), properties: None, volume: volume.data.name.clone(), @@ -135,7 +135,7 @@ async fn test_ui_databases_metastore_update_bug() { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_databases() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); // Create volume with empty name @@ -144,9 +144,9 @@ async fn test_ui_databases() { Op::Create, None, &Entity::Volume(VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, }), }), ) @@ -155,7 +155,7 @@ async fn test_ui_databases() { // Create database with empty name, error 400 let expected = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: String::new(), properties: None, volume: volume.data.name.clone(), @@ -177,7 +177,7 @@ async fn test_ui_databases() { // Create database, Ok let expected1 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test".to_string(), properties: None, volume: volume.data.name.clone(), @@ -190,7 +190,7 @@ async fn test_ui_databases() { assert_eq!(expected1.data, created_database.data); let expected2 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test2".to_string(), properties: None, volume: volume.data.name.clone(), @@ -198,7 +198,7 @@ async fn test_ui_databases() { .into(), }; let expected3 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test3".to_string(), properties: None, volume: volume.data.name.clone(), @@ -206,7 +206,7 @@ async fn test_ui_databases() { .into(), }; let expected4 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test4".to_string(), properties: None, volume: volume.data.name.clone(), @@ -281,7 +281,7 @@ async fn test_ui_databases() { // Create database with another name, Ok let expected_another = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "name".to_string(), properties: None, volume: volume.data.name.clone(), diff --git a/crates/runtime/src/http/ui/tests/navigation_trees.rs b/crates/runtime/src/http/ui/tests/navigation_trees.rs index 190e9a2b2..c0a1c7917 100644 --- a/crates/runtime/src/http/ui/tests/navigation_trees.rs +++ b/crates/runtime/src/http/ui/tests/navigation_trees.rs @@ -25,16 +25,16 @@ use crate::http::ui::tests::common::req; use crate::http::ui::tests::common::{ui_test_op, Entity, Op}; use crate::http::ui::volumes::models::{Volume, VolumeCreatePayload, VolumeCreateResponse}; use crate::http::ui::worksheets::models::{WorksheetCreatePayload, WorksheetResponse}; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; +use embucket_metastore::VolumeType as MetastoreVolumeType; +use embucket_metastore::{Database as MetastoreDatabase, Volume as MetastoreVolume}; use http::Method; -use icebucket_metastore::IceBucketVolumeType; -use icebucket_metastore::{IceBucketDatabase, IceBucketVolume}; use serde_json::json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_databases_navigation() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); let url = format!("http://{addr}/ui/navigation-trees"); let res = req(&client, Method::GET, &url, String::new()) @@ -49,9 +49,9 @@ async fn test_ui_databases_navigation() { Op::Create, None, &Entity::Volume(VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, }), }), ) @@ -60,7 +60,7 @@ async fn test_ui_databases_navigation() { // Create database, Ok let expected1 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test1".to_string(), properties: None, volume: volume.data.name.clone(), @@ -68,7 +68,7 @@ async fn test_ui_databases_navigation() { .into(), }; let expected2 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test2".to_string(), properties: None, volume: volume.data.name.clone(), @@ -76,7 +76,7 @@ async fn test_ui_databases_navigation() { .into(), }; let expected3 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test3".to_string(), properties: None, volume: volume.data.name.clone(), @@ -84,7 +84,7 @@ async fn test_ui_databases_navigation() { .into(), }; let expected4 = DatabaseCreatePayload { - data: IceBucketDatabase { + data: MetastoreDatabase { ident: "test4".to_string(), properties: None, volume: volume.data.name.clone(), diff --git a/crates/runtime/src/http/ui/tests/queries.rs b/crates/runtime/src/http/ui/tests/queries.rs index 899805a94..9b36fedae 100644 --- a/crates/runtime/src/http/ui/tests/queries.rs +++ b/crates/runtime/src/http/ui/tests/queries.rs @@ -23,15 +23,15 @@ use crate::http::ui::queries::models::{ }; use crate::http::ui::tests::common::req; use crate::http::ui::worksheets::models::{WorksheetCreatePayload, WorksheetResponse}; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; +use embucket_history::QueryStatus; use http::Method; -use icebucket_history::QueryStatus; use serde_json::json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_queries_no_worksheet() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); let res = req( @@ -65,7 +65,7 @@ async fn test_ui_queries_no_worksheet() { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_queries() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); let res = req( diff --git a/crates/runtime/src/http/ui/tests/schemas.rs b/crates/runtime/src/http/ui/tests/schemas.rs index f66337b82..7e0d55d26 100644 --- a/crates/runtime/src/http/ui/tests/schemas.rs +++ b/crates/runtime/src/http/ui/tests/schemas.rs @@ -21,15 +21,17 @@ use crate::http::ui::databases::models::{Database, DatabaseCreatePayload}; use crate::http::ui::schemas::models::{SchemaCreatePayload, SchemasResponse}; use crate::http::ui::tests::common::{req, ui_test_op, Entity, Op}; use crate::http::ui::volumes::models::{Volume, VolumeCreatePayload, VolumeCreateResponse}; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; +use embucket_metastore::{ + Database as MetastoreDatabase, Volume as MetastoreVolume, VolumeType as MetastoreVolumeType, +}; use http::Method; -use icebucket_metastore::{IceBucketDatabase, IceBucketVolume, IceBucketVolumeType}; use serde_json::json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_schemas() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); // Create volume with empty name @@ -38,9 +40,9 @@ async fn test_ui_schemas() { Op::Create, None, &Entity::Volume(VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, }), }), ) @@ -49,7 +51,7 @@ async fn test_ui_schemas() { let database_name = "test1".to_string(); // Create database, Ok - let expected1 = IceBucketDatabase { + let expected1 = MetastoreDatabase { ident: database_name.clone(), properties: None, volume: volume.data.name.clone(), diff --git a/crates/runtime/src/http/ui/tests/tables.rs b/crates/runtime/src/http/ui/tests/tables.rs index 3db0fcc05..5b376e7e9 100644 --- a/crates/runtime/src/http/ui/tests/tables.rs +++ b/crates/runtime/src/http/ui/tests/tables.rs @@ -26,16 +26,16 @@ use crate::http::ui::tables::models::{ use crate::http::ui::tests::common::{req, ui_test_op, Entity, Op}; use crate::http::ui::volumes::models::{VolumeCreatePayload, VolumeCreateResponse}; use crate::http::ui::worksheets::{WorksheetCreatePayload, WorksheetResponse}; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; +use embucket_metastore::VolumeType as MetastoreVolumeType; +use embucket_metastore::{Database as MetastoreDatabase, Volume as MetastoreVolume}; use http::Method; -use icebucket_metastore::IceBucketVolumeType; -use icebucket_metastore::{IceBucketDatabase, IceBucketVolume}; use serde_json::json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_tables() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); // Create volume with empty name @@ -44,9 +44,9 @@ async fn test_ui_tables() { Op::Create, None, &Entity::Volume(VolumeCreatePayload { - data: IceBucketVolume { + data: MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, } .into(), }), @@ -56,7 +56,7 @@ async fn test_ui_tables() { let database_name = "test1".to_string(); // Create database, Ok - let expected1 = IceBucketDatabase { + let expected1 = MetastoreDatabase { ident: database_name.clone(), properties: None, volume: volume.data.name.clone(), diff --git a/crates/runtime/src/http/ui/tests/volumes.rs b/crates/runtime/src/http/ui/tests/volumes.rs index 284e20d04..e2ceffdc2 100644 --- a/crates/runtime/src/http/ui/tests/volumes.rs +++ b/crates/runtime/src/http/ui/tests/volumes.rs @@ -18,24 +18,24 @@ #![allow(clippy::unwrap_used, clippy::expect_used)] use crate::http::ui::tests::common::{ui_test_op, Entity, Op}; use crate::http::ui::volumes::models::{Volume, VolumeCreatePayload, VolumeCreateResponse}; -use crate::tests::run_icebucket_test_server; -use icebucket_metastore::IceBucketVolume; -use icebucket_metastore::{ - AwsAccessKeyCredentials, AwsCredentials, IceBucketFileVolume, IceBucketS3Volume, - IceBucketVolumeType, +use crate::tests::run_test_server; +use embucket_metastore::Volume as MetastoreVolume; +use embucket_metastore::{ + AwsAccessKeyCredentials, AwsCredentials, FileVolume as MetastoreFileVolume, + S3Volume as MetastoreS3Volume, VolumeType as MetastoreVolumeType, }; use serde_json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_volumes_memory() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; // memory volume with empty ident create Ok let expected = VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::Memory, + volume: MetastoreVolumeType::Memory, }), }; let res = ui_test_op(addr, Op::Create, None, &Entity::Volume(expected.clone())).await; @@ -47,7 +47,7 @@ async fn test_ui_volumes_memory() { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_volumes_file() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; // memory volume with empty ident create Ok let payload = r#"{"name":"","type": "file", "path":"/tmp/data"}"#; @@ -59,9 +59,9 @@ async fn test_ui_volumes_file() { assert_eq!(expected.data, created.data); let expected = VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::File(IceBucketFileVolume { + volume: MetastoreVolumeType::File(MetastoreFileVolume { path: "/tmp/data".to_string(), }), }), @@ -74,15 +74,15 @@ async fn test_ui_volumes_file() { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_volumes_s3() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; // memory volume with empty ident create Ok let expected = VolumeCreatePayload { - data: Volume::from(IceBucketVolume { + data: Volume::from(MetastoreVolume { ident: String::new(), - volume: IceBucketVolumeType::S3(IceBucketS3Volume { + volume: MetastoreVolumeType::S3(MetastoreS3Volume { region: Some("us-west-1".to_string()), - bucket: Some("icebucket".to_string()), + bucket: Some("embucket".to_string()), endpoint: Some("http://localhost:9000".to_string()), skip_signature: None, metadata_endpoint: None, diff --git a/crates/runtime/src/http/ui/tests/worksheets.rs b/crates/runtime/src/http/ui/tests/worksheets.rs index 255c5cf5f..1e3326034 100644 --- a/crates/runtime/src/http/ui/tests/worksheets.rs +++ b/crates/runtime/src/http/ui/tests/worksheets.rs @@ -22,14 +22,14 @@ use crate::http::ui::tests::common::req; use crate::http::ui::worksheets::{ WorksheetCreatePayload, WorksheetResponse, WorksheetUpdatePayload, WorksheetsResponse, }; -use crate::tests::run_icebucket_test_server; +use crate::tests::run_test_server; use http::Method; use serde_json::json; #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_worksheets() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); let res = req( @@ -96,7 +96,7 @@ async fn test_ui_worksheets() { #[tokio::test] #[allow(clippy::too_many_lines)] async fn test_ui_worksheets_ops() { - let addr = run_icebucket_test_server().await; + let addr = run_test_server().await; let client = reqwest::Client::new(); // bad payload, None instead of string diff --git a/crates/runtime/src/http/ui/volumes/error.rs b/crates/runtime/src/http/ui/volumes/error.rs index 1b9c82b1c..9cdcdd9da 100644 --- a/crates/runtime/src/http/ui/volumes/error.rs +++ b/crates/runtime/src/http/ui/volumes/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_metastore::error::MetastoreError; use http::StatusCode; -use icebucket_metastore::error::MetastoreError; use snafu::prelude::*; pub type VolumesResult = Result; diff --git a/crates/runtime/src/http/ui/volumes/handlers.rs b/crates/runtime/src/http/ui/volumes/handlers.rs index c15d262d3..10e7dd07e 100644 --- a/crates/runtime/src/http/ui/volumes/handlers.rs +++ b/crates/runtime/src/http/ui/volumes/handlers.rs @@ -30,9 +30,9 @@ use axum::{ extract::{Path, Query, State}, Json, }; -use icebucket_metastore::error::MetastoreError; -use icebucket_metastore::models::IceBucketVolume; -use icebucket_utils::list_config::ListConfig; +use embucket_metastore::error::MetastoreError; +use embucket_metastore::models::Volume as MetastoreVolume; +use embucket_utils::list_config::ListConfig; use utoipa::OpenApi; use validator::Validate; @@ -77,15 +77,15 @@ pub async fn create_volume( State(state): State, Json(volume): Json, ) -> VolumesResult> { - let icebucket_volume: IceBucketVolume = volume.data.into(); - icebucket_volume + let embucket_volume: MetastoreVolume = volume.data.into(); + embucket_volume .validate() .map_err(|e| VolumesAPIError::Create { source: MetastoreError::Validation { source: e }, })?; state .metastore - .create_volume(&icebucket_volume.ident.clone(), icebucket_volume) + .create_volume(&embucket_volume.ident.clone(), embucket_volume) .await .map_err(|e| VolumesAPIError::Create { source: e }) .map(|o| { @@ -175,7 +175,7 @@ pub async fn update_volume( Path(volume_name): Path, Json(volume): Json, ) -> VolumesResult> { - let volume: IceBucketVolume = volume.data.into(); + let volume: MetastoreVolume = volume.data.into(); volume.validate().map_err(|e| VolumesAPIError::Update { source: MetastoreError::Validation { source: e }, })?; diff --git a/crates/runtime/src/http/ui/volumes/models.rs b/crates/runtime/src/http/ui/volumes/models.rs index 2f497f2ba..e70401383 100644 --- a/crates/runtime/src/http/ui/volumes/models.rs +++ b/crates/runtime/src/http/ui/volumes/models.rs @@ -15,10 +15,11 @@ // specific language governing permissions and limitations // under the License. -use icebucket_metastore::models::{ - AwsCredentials, IceBucketFileVolume, IceBucketS3Volume, IceBucketVolume, IceBucketVolumeType, +use embucket_metastore::models::{ + AwsCredentials, FileVolume as MetastoreFileVolume, S3Volume as MetastoreS3Volume, + Volume as MetastoreVolume, VolumeType as MetastoreVolumeType, }; -use icebucket_metastore::IceBucketS3TablesVolume; +use embucket_metastore::S3TablesVolume as MetastoreS3TablesVolume; use serde::{Deserialize, Serialize}; use utoipa::{IntoParams, ToSchema}; @@ -63,12 +64,12 @@ pub struct Volume { pub volume: VolumeType, } -impl From for Volume { - fn from(volume: IceBucketVolume) -> Self { +impl From for Volume { + fn from(volume: MetastoreVolume) -> Self { Self { name: volume.ident, volume: match volume.volume { - IceBucketVolumeType::S3(volume) => VolumeType::S3(S3Volume { + MetastoreVolumeType::S3(volume) => VolumeType::S3(S3Volume { region: volume.region, bucket: volume.bucket, endpoint: volume.endpoint, @@ -76,7 +77,7 @@ impl From for Volume { metadata_endpoint: volume.metadata_endpoint, credentials: volume.credentials, }), - IceBucketVolumeType::S3Tables(volume) => VolumeType::S3Tables(S3TablesVolume { + MetastoreVolumeType::S3Tables(volume) => VolumeType::S3Tables(S3TablesVolume { region: volume.region, bucket: volume.bucket, endpoint: volume.endpoint, @@ -84,8 +85,8 @@ impl From for Volume { name: volume.name, arn: volume.arn, }), - IceBucketVolumeType::File(file) => VolumeType::File(FileVolume { path: file.path }), - IceBucketVolumeType::Memory => VolumeType::Memory, + MetastoreVolumeType::File(file) => VolumeType::File(FileVolume { path: file.path }), + MetastoreVolumeType::Memory => VolumeType::Memory, }, } } @@ -93,12 +94,12 @@ impl From for Volume { // TODO: Remove it when found why it can't locate .into() if only From trait implemeted #[allow(clippy::from_over_into)] -impl Into for Volume { - fn into(self) -> IceBucketVolume { - IceBucketVolume { +impl Into for Volume { + fn into(self) -> MetastoreVolume { + MetastoreVolume { ident: self.name, volume: match self.volume { - VolumeType::S3(volume) => IceBucketVolumeType::S3(IceBucketS3Volume { + VolumeType::S3(volume) => MetastoreVolumeType::S3(MetastoreS3Volume { region: volume.region, bucket: volume.bucket, endpoint: volume.endpoint, @@ -107,7 +108,7 @@ impl Into for Volume { credentials: volume.credentials, }), VolumeType::S3Tables(volume) => { - IceBucketVolumeType::S3Tables(IceBucketS3TablesVolume { + MetastoreVolumeType::S3Tables(MetastoreS3TablesVolume { region: volume.region, bucket: volume.bucket, endpoint: volume.endpoint, @@ -117,9 +118,9 @@ impl Into for Volume { }) } VolumeType::File(volume) => { - IceBucketVolumeType::File(IceBucketFileVolume { path: volume.path }) + MetastoreVolumeType::File(MetastoreFileVolume { path: volume.path }) } - VolumeType::Memory => IceBucketVolumeType::Memory, + VolumeType::Memory => MetastoreVolumeType::Memory, }, } } diff --git a/crates/runtime/src/http/ui/worksheets/error.rs b/crates/runtime/src/http/ui/worksheets/error.rs index 0b0cfa414..2a1fd3762 100644 --- a/crates/runtime/src/http/ui/worksheets/error.rs +++ b/crates/runtime/src/http/ui/worksheets/error.rs @@ -19,8 +19,8 @@ use crate::http::error::ErrorResponse; use crate::http::ui::error::IntoStatusCode; use axum::response::IntoResponse; use axum::Json; +use embucket_history::store::WorksheetsStoreError; use http::status::StatusCode; -use icebucket_history::store::WorksheetsStoreError; use snafu::prelude::*; pub type WorksheetsResult = Result; diff --git a/crates/runtime/src/http/ui/worksheets/handlers.rs b/crates/runtime/src/http/ui/worksheets/handlers.rs index b09eb69e3..ea898ef91 100644 --- a/crates/runtime/src/http/ui/worksheets/handlers.rs +++ b/crates/runtime/src/http/ui/worksheets/handlers.rs @@ -27,7 +27,7 @@ use axum::{ Json, }; use chrono::Utc; -use icebucket_history::{Worksheet, WorksheetId}; +use embucket_history::{Worksheet, WorksheetId}; use tracing; use utoipa::OpenApi; diff --git a/crates/runtime/src/http/ui/worksheets/models.rs b/crates/runtime/src/http/ui/worksheets/models.rs index a4c8f4ab4..91ef7191b 100644 --- a/crates/runtime/src/http/ui/worksheets/models.rs +++ b/crates/runtime/src/http/ui/worksheets/models.rs @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -use icebucket_history::Worksheet; +use embucket_history::Worksheet; use serde::{Deserialize, Serialize}; use utoipa::ToSchema; diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index cef27e1d6..ec40d7e09 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -17,11 +17,11 @@ use std::sync::Arc; -use config::IceBucketRuntimeConfig; -use http::{make_icebucket_app, run_icebucket_app}; -use icebucket_history::store::SlateDBWorksheetsStore; -use icebucket_metastore::SlateDBMetastore; -use icebucket_utils::Db; +use config::RuntimeConfig; +use embucket_history::store::SlateDBWorksheetsStore; +use embucket_metastore::SlateDBMetastore; +use embucket_utils::Db; +use http::{make_app, run_app}; use object_store::{path::Path, ObjectStore}; use slatedb::{config::DbOptions, db::Db as SlateDb}; @@ -33,9 +33,9 @@ pub mod http; pub(crate) mod tests; #[allow(clippy::unwrap_used, clippy::as_conversions)] -pub async fn run_icebucket( +pub async fn run_binary( state_store: Arc, - config: IceBucketRuntimeConfig, + config: RuntimeConfig, ) -> Result<(), Box> { let db = { let options = DbOptions::default(); @@ -52,6 +52,6 @@ pub async fn run_icebucket( let metastore = Arc::new(SlateDBMetastore::new(db.clone())); let history = Arc::new(SlateDBWorksheetsStore::new(db)); - let app = make_icebucket_app(metastore, history, &config.web)?; - run_icebucket_app(app, &config.web).await + let app = make_app(metastore, history, &config.web)?; + run_app(app, &config.web).await } diff --git a/crates/runtime/src/tests/server.rs b/crates/runtime/src/tests/server.rs index 7d361685b..aef6408e2 100644 --- a/crates/runtime/src/tests/server.rs +++ b/crates/runtime/src/tests/server.rs @@ -15,15 +15,15 @@ // specific language governing permissions and limitations // under the License. -use crate::http::{config::IceBucketWebConfig, make_icebucket_app}; -use icebucket_history::store::SlateDBWorksheetsStore; -use icebucket_metastore::SlateDBMetastore; -use icebucket_utils::Db; +use crate::http::{config::WebConfig, make_app}; +use embucket_history::store::SlateDBWorksheetsStore; +use embucket_metastore::SlateDBMetastore; +use embucket_utils::Db; use std::net::SocketAddr; use std::sync::Arc; #[allow(clippy::unwrap_used)] -pub async fn run_icebucket_test_server() -> SocketAddr { +pub async fn run_test_server() -> SocketAddr { let listener = tokio::net::TcpListener::bind("0.0.0.0:0").await.unwrap(); let addr = listener.local_addr().unwrap(); @@ -31,10 +31,10 @@ pub async fn run_icebucket_test_server() -> SocketAddr { let metastore = Arc::new(SlateDBMetastore::new(db.clone())); let history = Arc::new(SlateDBWorksheetsStore::new(db)); - let app = make_icebucket_app( + let app = make_app( metastore, history, - &IceBucketWebConfig { + &WebConfig { port: 3000, host: "0.0.0.0".to_string(), allow_origin: None, diff --git a/crates/runtime/src/tests/session.rs b/crates/runtime/src/tests/session.rs index 79eb8161c..db16259f8 100644 --- a/crates/runtime/src/tests/session.rs +++ b/crates/runtime/src/tests/session.rs @@ -17,12 +17,12 @@ use std::sync::Arc; -use icebucket_metastore::{ - IceBucketDatabase, IceBucketSchema, IceBucketSchemaIdent, IceBucketVolume, Metastore, - SlateDBMetastore, +use embucket_metastore::{ + Database as MetastoreDatabase, Metastore, Schema as MetastoreSchema, + SchemaIdent as MetastoreSchemaIdent, SlateDBMetastore, Volume as MetastoreVolume, }; -use crate::execution::{query::IceBucketQueryContext, session::IceBucketUserSession}; +use crate::execution::{query::QueryContext, session::UserSession}; #[tokio::test] #[allow(clippy::expect_used, clippy::manual_let_else, clippy::too_many_lines)] @@ -31,9 +31,9 @@ async fn test_create_table_and_insert() { metastore .create_volume( &"test_volume".to_string(), - IceBucketVolume::new( + MetastoreVolume::new( "test_volume".to_string(), - icebucket_metastore::IceBucketVolumeType::Memory, + embucket_metastore::VolumeType::Memory, ), ) .await @@ -41,7 +41,7 @@ async fn test_create_table_and_insert() { metastore .create_database( &"benchmark".to_string(), - IceBucketDatabase { + MetastoreDatabase { ident: "benchmark".to_string(), properties: None, volume: "test_volume".to_string(), @@ -49,14 +49,14 @@ async fn test_create_table_and_insert() { ) .await .expect("Failed to create database"); - let schema_ident = IceBucketSchemaIdent { + let schema_ident = MetastoreSchemaIdent { database: "benchmark".to_string(), schema: "public".to_string(), }; metastore .create_schema( &schema_ident.clone(), - IceBucketSchema { + MetastoreSchema { ident: schema_ident, properties: None, }, @@ -64,7 +64,7 @@ async fn test_create_table_and_insert() { .await .expect("Failed to create schema"); let session = Arc::new( - IceBucketUserSession::new(metastore) + UserSession::new(metastore) .await .expect("Failed to create user session"), ); @@ -82,13 +82,13 @@ async fn test_create_table_and_insert() { PRIMARY KEY (CounterID, EventDate, EventTime, WatchID) ); "; - let query1 = session.query(create_query, IceBucketQueryContext::default()); + let query1 = session.query(create_query, QueryContext::default()); let statement = query1.parse_query().expect("Failed to parse query"); let result = query1.execute().await.expect("Failed to execute query"); let all_query = session - .query("SHOW TABLES", IceBucketQueryContext::default()) + .query("SHOW TABLES", QueryContext::default()) .execute() .await .expect("Failed to execute query"); @@ -96,7 +96,7 @@ async fn test_create_table_and_insert() { let insert_query = session .query( "INSERT INTO benchmark.public.hits VALUES (1, 1, 'test', 1, 1, 1, 1, 1)", - IceBucketQueryContext::default(), + QueryContext::default(), ) .execute() .await @@ -105,7 +105,7 @@ async fn test_create_table_and_insert() { let select_query = session .query( "SELECT * FROM benchmark.public.hits", - IceBucketQueryContext::default(), + QueryContext::default(), ) .execute() .await diff --git a/crates/runtime/src/tests/snapshots/icebucket_runtime__tests__session__create_table_and_insert.snap b/crates/runtime/src/tests/snapshots/embucket_runtime__tests__session__create_table_and_insert.snap similarity index 99% rename from crates/runtime/src/tests/snapshots/icebucket_runtime__tests__session__create_table_and_insert.snap rename to crates/runtime/src/tests/snapshots/embucket_runtime__tests__session__create_table_and_insert.snap index e29cd6d45..c2b6d9abb 100644 --- a/crates/runtime/src/tests/snapshots/icebucket_runtime__tests__session__create_table_and_insert.snap +++ b/crates/runtime/src/tests/snapshots/embucket_runtime__tests__session__create_table_and_insert.snap @@ -1,7 +1,6 @@ --- source: crates/runtime/src/tests/session.rs expression: "(statement, result, all_query, insert_query, select_query)" -snapshot_kind: text --- ( Statement( diff --git a/crates/runtime/src/tests/utils.rs b/crates/runtime/src/tests/utils.rs index d975d9943..31d3b010e 100644 --- a/crates/runtime/src/tests/utils.rs +++ b/crates/runtime/src/tests/utils.rs @@ -17,46 +17,46 @@ use std::sync::Arc; -use crate::execution::{query::IceBucketQueryContext, session::IceBucketUserSession}; -use icebucket_metastore::{ - IceBucketDatabase, IceBucketSchema, IceBucketSchemaIdent, IceBucketVolume, Metastore, - SlateDBMetastore, +use crate::execution::{query::QueryContext, session::UserSession}; +use embucket_metastore::{ + Database as MetastoreDatabase, Metastore, Schema as MetastoreSchema, + SchemaIdent as MetastoreSchemaIdent, SlateDBMetastore, Volume as MetastoreVolume, }; static TABLE_SETUP: &str = include_str!(r"./queries/table_setup.sql"); #[allow(clippy::unwrap_used, clippy::expect_used)] -pub async fn create_df_session() -> Arc { +pub async fn create_df_session() -> Arc { let metastore = SlateDBMetastore::new_in_memory().await; metastore .create_volume( &"test_volume".to_string(), - IceBucketVolume::new( + MetastoreVolume::new( "test_volume".to_string(), - icebucket_metastore::IceBucketVolumeType::Memory, + embucket_metastore::VolumeType::Memory, ), ) .await .expect("Failed to create volume"); metastore .create_database( - &"icebucket".to_string(), - IceBucketDatabase { - ident: "icebucket".to_string(), + &"embucket".to_string(), + MetastoreDatabase { + ident: "embucket".to_string(), properties: None, volume: "test_volume".to_string(), }, ) .await .expect("Failed to create database"); - let schema_ident = IceBucketSchemaIdent { - database: "icebucket".to_string(), + let schema_ident = MetastoreSchemaIdent { + database: "embucket".to_string(), schema: "public".to_string(), }; metastore .create_schema( &schema_ident.clone(), - IceBucketSchema { + MetastoreSchema { ident: schema_ident, properties: None, }, @@ -65,14 +65,14 @@ pub async fn create_df_session() -> Arc { .expect("Failed to create schema"); let user_session = Arc::new( - IceBucketUserSession::new(metastore) + UserSession::new(metastore) .await .expect("Failed to create user session"), ); for query in TABLE_SETUP.split(';') { if !query.is_empty() { - let query = user_session.query(query, IceBucketQueryContext::default()); + let query = user_session.query(query, QueryContext::default()); query.execute().await.unwrap(); //ctx.sql(query).await.unwrap().collect().await.unwrap(); } @@ -88,7 +88,7 @@ pub mod macros { async fn [< query_ $test_fn_name >]() { let ctx = crate::tests::utils::create_df_session().await; - let query = ctx.query($query, crate::execution::query::IceBucketQueryContext::default()); + let query = ctx.query($query, crate::execution::query::QueryContext::default()); let statement = query.parse_query().unwrap(); let plan = query.plan().await; //TODO: add our plan processing also diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 78cb31026..9b9500cbb 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "icebucket_utils" +name = "embucket_utils" version = "0.1.0" edition = "2021" license-file = { workspace = true } diff --git a/crates/utils/src/snapshots/icebucket_utils__test__db.snap b/crates/utils/src/snapshots/embucket_utils__test__db.snap similarity index 100% rename from crates/utils/src/snapshots/icebucket_utils__test__db.snap rename to crates/utils/src/snapshots/embucket_utils__test__db.snap