Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion crates/catalog/glue/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ impl Catalog for GlueCatalog {
/// attempting to drop the table. This includes scenarios where
/// the table does not exist.
/// - Any network or communication error occurs with the database backend.
async fn drop_table(&self, table: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

glue should use purge

let db_name = validate_namespace(table.namespace())?;
let table_name = table.name();

Expand Down
2 changes: 1 addition & 1 deletion crates/catalog/hms/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ impl Catalog for HmsCatalog {
/// attempting to drop the table. This includes scenarios where
/// the table does not exist.
/// - Any network or communication error occurs with the database backend.
async fn drop_table(&self, table: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hms should use purge, looking at java's impl: https://github.com/apache/iceberg/blob/8c2ca1d084fca37671ba8b38d59ea3f5a187b147/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java#L244-L251

Looks like we intend to skip hive's purge but use fileIO to purge table

let db_name = validate_namespace(table.namespace())?;
if !self.namespace_exists(table.namespace()).await? {
return Err(Error::new(
Expand Down
13 changes: 9 additions & 4 deletions crates/catalog/rest/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -827,13 +827,18 @@ impl Catalog for RestCatalog {
}

/// Drop a table from the catalog.
async fn drop_table(&self, table: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, table: &TableIdent, purge: bool) -> Result<()> {
let context = self.context().await?;

let request = context
let mut request_builder = context
.client
.request(Method::DELETE, context.config.table_endpoint(table))
.build()?;
.request(Method::DELETE, context.config.table_endpoint(table));

if purge {
request_builder = request_builder.query(&[("purgeRequested", "true")]);
}

let request = request_builder.build()?;

let http_response = context.client.query_catalog(request).await?;

Expand Down
2 changes: 1 addition & 1 deletion crates/catalog/s3tables/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ impl Catalog for S3TablesCatalog {
/// This function can return an error in the following situations:
/// - Errors from the underlying database deletion process, converted using
/// `from_aws_sdk_error`.
async fn drop_table(&self, table: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> {
let req = self
.s3tables_client
.delete_table()
Expand Down
2 changes: 1 addition & 1 deletion crates/catalog/sql/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ impl Catalog for SqlCatalog {
}
}

async fn drop_table(&self, identifier: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, identifier: &TableIdent, _purge: bool) -> Result<()> {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm actually not sure if sqlCatalog should support this

if !self.table_exists(identifier).await? {
return no_such_table_err(identifier);
}
Expand Down
2 changes: 1 addition & 1 deletion crates/iceberg/src/catalog/memory/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ impl Catalog for MemoryCatalog {
}

/// Drop a table from the catalog.
async fn drop_table(&self, table_ident: &TableIdent) -> Result<()> {
async fn drop_table_with_purge(&self, table_ident: &TableIdent, _purge: bool) -> Result<()> {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Memory catalog should use purge

let mut root_namespace_state = self.root_namespace_state.lock().await;

root_namespace_state.remove_existing_table(table_ident)?;
Expand Down
12 changes: 11 additions & 1 deletion crates/iceberg/src/catalog/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

pub mod memory;
mod metadata_location;
pub mod utils;

use std::collections::HashMap;
use std::fmt::{Debug, Display};
Expand Down Expand Up @@ -95,8 +96,17 @@ pub trait Catalog: Debug + Sync + Send {
/// Load table from the catalog.
async fn load_table(&self, table: &TableIdent) -> Result<Table>;

/// Drop a table from the catalog and purge its data, or returns error if it doesn't exist.
///
/// This is equivalent to calling `drop_table_with_purge(table, true)`.
async fn drop_table(&self, table: &TableIdent) -> Result<()> {
self.drop_table_with_purge(table, true).await
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is a default function, but I'm a bit concerned with forcing purge here. For instance, in the glue catalog rename leverages a drop_table() call.

let drop_src_table_result = self.drop_table(src).await;
match drop_src_table_result {
Ok(_) => Ok(()),
Err(_) => {
let err_msg_src_table =
format!("Failed to drop old table {src_db_name}.{src_table_name}.");
let drop_dest_table_result = self.drop_table(dest).await;

This could mean that data would be incorrectly cleaned up. Same issue applies to the DataFusion integration right in this case we wouldn't want to purge on drop table (it looks like they haven't added a purge clause yet). Either way we need to either expose purge higher and force it to false or fix all these call sites (catalog, and engine level)

}

/// Drop a table from the catalog, or returns error if it doesn't exist.
async fn drop_table(&self, table: &TableIdent) -> Result<()>;
///
/// If `purge` is true, the catalog should also delete the underlying table data.
async fn drop_table_with_purge(&self, table: &TableIdent, purge: bool) -> Result<()>;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the naming here makes it seem like we are always purging


/// Check if a table exists in the catalog.
async fn table_exists(&self, table: &TableIdent) -> Result<bool>;
Expand Down
151 changes: 151 additions & 0 deletions crates/iceberg/src/catalog/utils.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Utility functions for catalog operations.

use std::collections::HashSet;

use crate::io::FileIO;
use crate::spec::TableMetadata;
use crate::Result;

/// Property key for enabling garbage collection on drop.
/// When set to `false`, data files will not be deleted when a table is dropped.
/// Defaults to `true`.
pub const GC_ENABLED: &str = "gc.enabled";
const GC_ENABLED_DEFAULT: bool = true;
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be moved to TableProperty


/// Deletes all data and metadata files referenced by the given table metadata.
///
/// This mirrors the Java implementation's `CatalogUtil.dropTableData`.
/// It collects all manifest files, manifest lists, previous metadata files,
/// statistics files, and partition statistics files, then deletes them.
///
/// Data files within manifests are only deleted if the `gc.enabled` table
/// property is `true` (the default), to avoid corrupting other tables that
/// may share the same data files.
///
/// Individual file deletion failures are suppressed to complete as much
/// cleanup as possible, matching the Java behavior.
pub async fn drop_table_data(
io: &FileIO,
metadata: &TableMetadata,
metadata_location: Option<&str>,
) -> Result<()> {
let mut manifest_lists_to_delete: HashSet<String> = HashSet::new();
let mut manifests_to_delete: HashSet<String> = HashSet::new();

for snapshot in metadata.snapshots() {
// Collect the manifest list location
let manifest_list_location = snapshot.manifest_list();
if !manifest_list_location.is_empty() {
manifest_lists_to_delete.insert(manifest_list_location.to_string());
}

// Load all manifests from this snapshot
match snapshot.load_manifest_list(io, metadata).await {
Ok(manifest_list) => {
for manifest_file in manifest_list.entries() {
manifests_to_delete.insert(manifest_file.manifest_path.clone());
}
}
Err(_) => {
// Suppress failure to continue cleanup
}
}
}

let gc_enabled = metadata
.properties()
.get(GC_ENABLED)
.and_then(|v| v.parse::<bool>().ok())
.unwrap_or(GC_ENABLED_DEFAULT);

// Delete data files only if gc.enabled is true, to avoid corrupting shared tables
if gc_enabled {
delete_data_files(io, &manifests_to_delete).await;
}

// Delete manifest files
delete_files(io, manifests_to_delete.iter().map(String::as_str)).await;

// Delete manifest lists
delete_files(io, manifest_lists_to_delete.iter().map(String::as_str)).await;

// Delete previous metadata files
delete_files(
io,
metadata.metadata_log().iter().map(|m| m.metadata_file.as_str()),
)
.await;

// Delete statistics files
delete_files(
io,
metadata
.statistics_iter()
.map(|s| s.statistics_path.as_str()),
)
.await;

// Delete partition statistics files
delete_files(
io,
metadata
.partition_statistics_iter()
.map(|s| s.statistics_path.as_str()),
)
.await;

// Delete the current metadata file
if let Some(location) = metadata_location {
let _ = io.delete(location).await;
}

Ok(())
}

/// Reads each manifest and deletes the data files referenced within.
async fn delete_data_files(io: &FileIO, manifest_paths: &HashSet<String>) {
for manifest_path in manifest_paths {
let input = match io.new_input(manifest_path) {
Ok(input) => input,
Err(_) => continue,
};

let manifest_content = match input.read().await {
Ok(content) => content,
Err(_) => continue,
};

let manifest = match crate::spec::Manifest::parse_avro(&manifest_content) {
Ok(manifest) => manifest,
Err(_) => continue,
};

for entry in manifest.entries() {
let _ = io.delete(entry.data_file.file_path()).await;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks like a good case to add batch delete support with all the storage work being done

}
}
}

/// Deletes a collection of files, suppressing individual failures.
async fn delete_files<'a>(io: &FileIO, paths: impl Iterator<Item = &'a str>) {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should use delete_stream api we are about to add

for path in paths {
let _ = io.delete(path).await;
}
}
Loading