Skip to content

Commit 203f869

Browse files
authored
[Uniform]Refactor ExpireSnapshotHelper to follow Iceberg's ExpireSnapshot's API (delta-io#4338)
<!-- Thanks for sending a pull request! Here are some tips for you: 1. If this is your first time, please read our contributor guidelines: https://github.com/delta-io/delta/blob/master/CONTRIBUTING.md 2. If the PR is unfinished, add '[WIP]' in your PR title, e.g., '[WIP] Your PR title ...'. 3. Be sure to keep the PR description updated to reflect all changes. 4. Please write your PR title to summarize what this PR proposes. 5. If possible, provide a concise example to reproduce the issue for a faster review. 6. If applicable, include the corresponding issue number in the PR title and link it in the body. --> #### Which Delta project/connector is this regarding? <!-- Please add the component selected below to the beginning of the pull request title For example: [Spark] Title of my pull request --> - [ ] Spark - [ ] Standalone - [ ] Flink - [ ] Kernel - [x] Other (uniform) ## Description <!-- - Describe what this PR changes. - Describe why we need the change. If this PR resolves an issue be sure to include "Resolves #XXX" to correctly link and close the issue upon merge. --> This PR makes no functional change, it mainly makes ExpireSnapshotHelper follow Iceberg's ExpireSnapshot's API and removes the custom logic outside of the method that creates ExpireSnapshotHelper. https://github.com/apache/iceberg/blob/main/api/src/main/java/org/apache/iceberg/ExpireSnapshots.java#L40 ## How was this patch tested? <!-- If tests were added, say they were added here. Please make sure to test the changes thoroughly including negative and positive cases if possible. If the changes were tested in any way other than unit tests, please clarify how you tested step by step (ideally copy and paste-able, so that other reviewers can test and check, and descendants can verify in the future). If the changes were not tested, please explain why. --> Existing tests ## Does this PR introduce _any_ user-facing changes? <!-- If yes, please clarify the previous behavior and the change this PR proposes - provide the console output, description and/or an example to show the behavior difference if possible. If possible, please also clarify if this is a user-facing change compared to the released Delta Lake versions or within the unreleased branches such as master. If no, write 'No'. --> No
1 parent bb93957 commit 203f869

File tree

2 files changed

+32
-21
lines changed

2 files changed

+32
-21
lines changed

iceberg/src/main/scala/org/apache/spark/sql/delta/icebergShaded/IcebergConversionTransaction.scala

Lines changed: 12 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
package org.apache.spark.sql.delta.icebergShaded
1818

1919
import java.util.ConcurrentModificationException
20+
import java.util.function.Consumer
2021

2122
import scala.collection.JavaConverters._
2223
import scala.collection.mutable.ArrayBuffer
@@ -203,6 +204,16 @@ class IcebergConversionTransaction(
203204
class ExpireSnapshotHelper(expireSnapshot: ExpireSnapshots)
204205
extends TransactionHelper(expireSnapshot) {
205206

207+
def cleanExpiredFiles(clean: Boolean): ExpireSnapshotHelper = {
208+
expireSnapshot.cleanExpiredFiles(clean)
209+
this
210+
}
211+
212+
def deleteWith(newDeleteFunc: Consumer[String]): ExpireSnapshotHelper = {
213+
expireSnapshot.deleteWith(newDeleteFunc)
214+
this
215+
}
216+
206217
override def opType: String = "expireSnapshot"
207218
}
208219

@@ -272,26 +283,7 @@ class IcebergConversionTransaction(
272283
}
273284

274285
def getExpireSnapshotHelper(): ExpireSnapshotHelper = {
275-
val table = txn.table()
276-
val tableLocation = LocationUtil.stripTrailingSlash(table.location)
277-
val defaultWriteMetadataLocation = s"$tableLocation/metadata"
278-
val writeMetadataLocation = LocationUtil.stripTrailingSlash(
279-
table.properties().getOrDefault(
280-
TableProperties.WRITE_METADATA_LOCATION, defaultWriteMetadataLocation))
281-
val expireSnapshots = if (tablePath.toString == writeMetadataLocation) {
282-
// Don't attempt any file cleanup in the edge-case configuration
283-
// that the data location (in Uniform the table root location)
284-
// is the same as the Iceberg metadata location
285-
txn.expireSnapshots().cleanExpiredFiles(false)
286-
} else {
287-
txn.expireSnapshots().deleteWith(path => {
288-
if (path.startsWith(writeMetadataLocation)) {
289-
table.io().deleteFile(path)
290-
}
291-
})
292-
}
293-
294-
val ret = new ExpireSnapshotHelper(expireSnapshots)
286+
val ret = new ExpireSnapshotHelper(txn.expireSnapshots())
295287
fileUpdates += ret
296288
ret
297289
}

iceberg/src/main/scala/org/apache/spark/sql/delta/icebergShaded/IcebergConverter.scala

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,10 @@ import org.apache.spark.sql.delta.metering.DeltaLogging
3232
import org.apache.spark.sql.delta.sources.DeltaSQLConf
3333
import org.apache.commons.lang3.exception.ExceptionUtils
3434
import org.apache.hadoop.fs.Path
35-
import shadedForDelta.org.apache.iceberg.{Table => IcebergTable}
35+
import shadedForDelta.org.apache.iceberg.{Table => IcebergTable, TableProperties}
3636
import shadedForDelta.org.apache.iceberg.exceptions.CommitFailedException
3737
import shadedForDelta.org.apache.iceberg.hive.{HiveCatalog, HiveTableOperations}
38+
import shadedForDelta.org.apache.iceberg.util.LocationUtil
3839

3940
import org.apache.spark.internal.MDC
4041
import org.apache.spark.sql.SparkSession
@@ -418,6 +419,24 @@ class IcebergConverter(spark: SparkSession)
418419
log"[path = ${MDC(DeltaLogKeys.PATH, log.logPath)}] tableId=" +
419420
log"${MDC(DeltaLogKeys.TABLE_ID, log.tableId)}]")
420421
val expireSnapshotHelper = icebergTxn.getExpireSnapshotHelper()
422+
val table = icebergTxn.txn.table()
423+
val tableLocation = LocationUtil.stripTrailingSlash(table.location)
424+
val defaultWriteMetadataLocation = s"$tableLocation/metadata"
425+
val writeMetadataLocation = LocationUtil.stripTrailingSlash(
426+
table.properties().getOrDefault(
427+
TableProperties.WRITE_METADATA_LOCATION, defaultWriteMetadataLocation))
428+
if (snapshotToConvert.path.toString == writeMetadataLocation) {
429+
// Don't attempt any file cleanup in the edge-case configuration
430+
// that the data location (in Uniform the table root location)
431+
// is the same as the Iceberg metadata location
432+
expireSnapshotHelper.cleanExpiredFiles(false)
433+
} else {
434+
expireSnapshotHelper.deleteWith(path => {
435+
if (path.startsWith(writeMetadataLocation)) {
436+
table.io().deleteFile(path)
437+
}
438+
})
439+
}
421440
expireSnapshotHelper.commit()
422441
}
423442

0 commit comments

Comments
 (0)