File tree Expand file tree Collapse file tree 2 files changed +6
-3
lines changed
core/src/main/scala/org/apache/spark/util
sql/catalyst/src/main/scala/org/apache/spark/sql/internal Expand file tree Collapse file tree 2 files changed +6
-3
lines changed Original file line number Diff line number Diff line change @@ -270,7 +270,8 @@ private[spark] object Utils extends Logging {
270270 }
271271
272272 /**
273- * Move data to trash if 'spark.sql.truncate.trash.enabled' is true
273+ * Move data to trash if 'spark.sql.truncate.trash.enabled' is true, else
274+ * delete the data permanently. If move data to trash failed fallback to hard deletion.
274275 */
275276 def moveToTrashIfEnabled (
276277 fs : FileSystem ,
@@ -281,7 +282,8 @@ private[spark] object Utils extends Logging {
281282 logDebug(s " will move data ${partitionPath.toString} to trash " )
282283 val isSuccess = Trash .moveToAppropriateTrash(fs, partitionPath, hadoopConf)
283284 if (! isSuccess) {
284- logWarning(s " Failed to move data ${partitionPath.toString} to trash " )
285+ logWarning(s " Failed to move data ${partitionPath.toString} to trash. " +
286+ " Fallback to hard deletion" )
285287 return fs.delete(partitionPath, true )
286288 }
287289 isSuccess
Original file line number Diff line number Diff line change @@ -2728,7 +2728,8 @@ object SQLConf {
27282728 " to trash directory or deleted permanently. The trash retention time is controlled by " +
27292729 " fs.trash.interval, and in default, the server side configuration value takes " +
27302730 " precedence over the client-side one. Note that if fs.trash.interval is non-positive, " +
2731- " this will be a no-op and log a warning message." )
2731+ " this will be a no-op and log a warning message. If the data fails to be moved to " +
2732+ " trash, Spark will turn to delete it permanently." )
27322733 .version(" 3.1.0" )
27332734 .booleanConf
27342735 .createWithDefault(false )
You can’t perform that action at this time.
0 commit comments