-
Notifications
You must be signed in to change notification settings - Fork 430
feat(catalog): adding support for purge_table #2232
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -580,7 +580,7 @@ impl Catalog for HmsCatalog { | |
| /// attempting to drop the table. This includes scenarios where | ||
| /// the table does not exist. | ||
| /// - Any network or communication error occurs with the database backend. | ||
| async fn drop_table(&self, table: &TableIdent) -> Result<()> { | ||
| async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> { | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. hms should use purge, looking at java's impl: https://github.com/apache/iceberg/blob/8c2ca1d084fca37671ba8b38d59ea3f5a187b147/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java#L244-L251 Looks like we intend to skip hive's purge but use fileIO to purge table |
||
| let db_name = validate_namespace(table.namespace())?; | ||
| if !self.namespace_exists(table.namespace()).await? { | ||
| return Err(Error::new( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -729,7 +729,7 @@ impl Catalog for SqlCatalog { | |
| } | ||
| } | ||
|
|
||
| async fn drop_table(&self, identifier: &TableIdent) -> Result<()> { | ||
| async fn drop_table_with_purge(&self, identifier: &TableIdent, _purge: bool) -> Result<()> { | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm actually not sure if sqlCatalog should support this |
||
| if !self.table_exists(identifier).await? { | ||
| return no_such_table_err(identifier); | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -319,7 +319,7 @@ impl Catalog for MemoryCatalog { | |
| } | ||
|
|
||
| /// Drop a table from the catalog. | ||
| async fn drop_table(&self, table_ident: &TableIdent) -> Result<()> { | ||
| async fn drop_table_with_purge(&self, table_ident: &TableIdent, _purge: bool) -> Result<()> { | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Memory catalog should use purge |
||
| let mut root_namespace_state = self.root_namespace_state.lock().await; | ||
|
|
||
| root_namespace_state.remove_existing_table(table_ident)?; | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -19,6 +19,7 @@ | |||||||||||||||||||
|
|
||||||||||||||||||||
| pub mod memory; | ||||||||||||||||||||
| mod metadata_location; | ||||||||||||||||||||
| pub mod utils; | ||||||||||||||||||||
|
|
||||||||||||||||||||
| use std::collections::HashMap; | ||||||||||||||||||||
| use std::fmt::{Debug, Display}; | ||||||||||||||||||||
|
|
@@ -95,8 +96,17 @@ pub trait Catalog: Debug + Sync + Send { | |||||||||||||||||||
| /// Load table from the catalog. | ||||||||||||||||||||
| async fn load_table(&self, table: &TableIdent) -> Result<Table>; | ||||||||||||||||||||
|
|
||||||||||||||||||||
| /// Drop a table from the catalog and purge its data, or returns error if it doesn't exist. | ||||||||||||||||||||
| /// | ||||||||||||||||||||
| /// This is equivalent to calling `drop_table_with_purge(table, true)`. | ||||||||||||||||||||
| async fn drop_table(&self, table: &TableIdent) -> Result<()> { | ||||||||||||||||||||
| self.drop_table_with_purge(table, true).await | ||||||||||||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I know this is a default function, but I'm a bit concerned with forcing purge here. For instance, in the glue catalog rename leverages a iceberg-rust/crates/catalog/glue/src/catalog.rs Lines 748 to 756 in d15deeb
This could mean that data would be incorrectly cleaned up. Same issue applies to the DataFusion integration right in this case we wouldn't want to purge on |
||||||||||||||||||||
| } | ||||||||||||||||||||
|
|
||||||||||||||||||||
| /// Drop a table from the catalog, or returns error if it doesn't exist. | ||||||||||||||||||||
| async fn drop_table(&self, table: &TableIdent) -> Result<()>; | ||||||||||||||||||||
| /// | ||||||||||||||||||||
| /// If `purge` is true, the catalog should also delete the underlying table data. | ||||||||||||||||||||
| async fn drop_table_with_purge(&self, table: &TableIdent, purge: bool) -> Result<()>; | ||||||||||||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the naming here makes it seem like we are always purging |
||||||||||||||||||||
|
|
||||||||||||||||||||
| /// Check if a table exists in the catalog. | ||||||||||||||||||||
| async fn table_exists(&self, table: &TableIdent) -> Result<bool>; | ||||||||||||||||||||
|
|
||||||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,151 @@ | ||
| // Licensed to the Apache Software Foundation (ASF) under one | ||
| // or more contributor license agreements. See the NOTICE file | ||
| // distributed with this work for additional information | ||
| // regarding copyright ownership. The ASF licenses this file | ||
| // to you under the Apache License, Version 2.0 (the | ||
| // "License"); you may not use this file except in compliance | ||
| // with the License. You may obtain a copy of the License at | ||
| // | ||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||
| // | ||
| // Unless required by applicable law or agreed to in writing, | ||
| // software distributed under the License is distributed on an | ||
| // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| // KIND, either express or implied. See the License for the | ||
| // specific language governing permissions and limitations | ||
| // under the License. | ||
|
|
||
| //! Utility functions for catalog operations. | ||
|
|
||
| use std::collections::HashSet; | ||
|
|
||
| use crate::io::FileIO; | ||
| use crate::spec::TableMetadata; | ||
| use crate::Result; | ||
|
|
||
| /// Property key for enabling garbage collection on drop. | ||
| /// When set to `false`, data files will not be deleted when a table is dropped. | ||
| /// Defaults to `true`. | ||
| pub const GC_ENABLED: &str = "gc.enabled"; | ||
| const GC_ENABLED_DEFAULT: bool = true; | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should be moved to TableProperty |
||
|
|
||
| /// Deletes all data and metadata files referenced by the given table metadata. | ||
| /// | ||
| /// This mirrors the Java implementation's `CatalogUtil.dropTableData`. | ||
| /// It collects all manifest files, manifest lists, previous metadata files, | ||
| /// statistics files, and partition statistics files, then deletes them. | ||
| /// | ||
| /// Data files within manifests are only deleted if the `gc.enabled` table | ||
| /// property is `true` (the default), to avoid corrupting other tables that | ||
| /// may share the same data files. | ||
| /// | ||
| /// Individual file deletion failures are suppressed to complete as much | ||
| /// cleanup as possible, matching the Java behavior. | ||
| pub async fn drop_table_data( | ||
| io: &FileIO, | ||
| metadata: &TableMetadata, | ||
| metadata_location: Option<&str>, | ||
| ) -> Result<()> { | ||
| let mut manifest_lists_to_delete: HashSet<String> = HashSet::new(); | ||
| let mut manifests_to_delete: HashSet<String> = HashSet::new(); | ||
|
|
||
| for snapshot in metadata.snapshots() { | ||
| // Collect the manifest list location | ||
| let manifest_list_location = snapshot.manifest_list(); | ||
| if !manifest_list_location.is_empty() { | ||
| manifest_lists_to_delete.insert(manifest_list_location.to_string()); | ||
| } | ||
|
|
||
| // Load all manifests from this snapshot | ||
| match snapshot.load_manifest_list(io, metadata).await { | ||
| Ok(manifest_list) => { | ||
| for manifest_file in manifest_list.entries() { | ||
| manifests_to_delete.insert(manifest_file.manifest_path.clone()); | ||
| } | ||
| } | ||
| Err(_) => { | ||
| // Suppress failure to continue cleanup | ||
| } | ||
| } | ||
| } | ||
|
|
||
| let gc_enabled = metadata | ||
| .properties() | ||
| .get(GC_ENABLED) | ||
| .and_then(|v| v.parse::<bool>().ok()) | ||
| .unwrap_or(GC_ENABLED_DEFAULT); | ||
|
|
||
| // Delete data files only if gc.enabled is true, to avoid corrupting shared tables | ||
| if gc_enabled { | ||
| delete_data_files(io, &manifests_to_delete).await; | ||
| } | ||
|
|
||
| // Delete manifest files | ||
| delete_files(io, manifests_to_delete.iter().map(String::as_str)).await; | ||
|
|
||
| // Delete manifest lists | ||
| delete_files(io, manifest_lists_to_delete.iter().map(String::as_str)).await; | ||
|
|
||
| // Delete previous metadata files | ||
| delete_files( | ||
| io, | ||
| metadata.metadata_log().iter().map(|m| m.metadata_file.as_str()), | ||
| ) | ||
| .await; | ||
|
|
||
| // Delete statistics files | ||
| delete_files( | ||
| io, | ||
| metadata | ||
| .statistics_iter() | ||
| .map(|s| s.statistics_path.as_str()), | ||
| ) | ||
| .await; | ||
|
|
||
| // Delete partition statistics files | ||
| delete_files( | ||
| io, | ||
| metadata | ||
| .partition_statistics_iter() | ||
| .map(|s| s.statistics_path.as_str()), | ||
| ) | ||
| .await; | ||
|
|
||
| // Delete the current metadata file | ||
| if let Some(location) = metadata_location { | ||
| let _ = io.delete(location).await; | ||
| } | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Reads each manifest and deletes the data files referenced within. | ||
| async fn delete_data_files(io: &FileIO, manifest_paths: &HashSet<String>) { | ||
| for manifest_path in manifest_paths { | ||
| let input = match io.new_input(manifest_path) { | ||
| Ok(input) => input, | ||
| Err(_) => continue, | ||
| }; | ||
|
|
||
| let manifest_content = match input.read().await { | ||
| Ok(content) => content, | ||
| Err(_) => continue, | ||
| }; | ||
|
|
||
| let manifest = match crate::spec::Manifest::parse_avro(&manifest_content) { | ||
| Ok(manifest) => manifest, | ||
| Err(_) => continue, | ||
| }; | ||
|
|
||
| for entry in manifest.entries() { | ||
| let _ = io.delete(entry.data_file.file_path()).await; | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This looks like a good case to add batch delete support with all the storage work being done |
||
| } | ||
| } | ||
| } | ||
|
|
||
| /// Deletes a collection of files, suppressing individual failures. | ||
| async fn delete_files<'a>(io: &FileIO, paths: impl Iterator<Item = &'a str>) { | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should use delete_stream api we are about to add |
||
| for path in paths { | ||
| let _ = io.delete(path).await; | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
glue should use purge