Skip to content

Commit 8af4579

Browse files
committed
fix test failure
1 parent ef45539 commit 8af4579

File tree

1 file changed

+6
-8
lines changed

1 file changed

+6
-8
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatDataWriter.scala

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ abstract class FileFormatDataWriter(
7373
*/
7474
def commit(): WriteTaskResult = {
7575
releaseResources()
76-
committer.commitTask(taskAttemptContext)
7776
val summary = ExecutedWriteSummary(
7877
updatedPartitions = updatedPartitions.toSet,
7978
stats = statsTrackers.map(_.getFinalStats()))
@@ -108,6 +107,9 @@ class SingleDirectoryDataWriter(
108107
newOutputWriter()
109108

110109
private def newOutputWriter(): Unit = {
110+
recordsInFile = 0
111+
releaseResources()
112+
111113
val ext = description.outputWriterFactory.getFileExtension(taskAttemptContext)
112114
val currentPath = committer.newTaskTempFile(
113115
taskAttemptContext,
@@ -128,8 +130,6 @@ class SingleDirectoryDataWriter(
128130
assert(fileCounter < MAX_FILE_COUNTER,
129131
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
130132

131-
recordsInFile = 0
132-
releaseResources()
133133
newOutputWriter()
134134
}
135135

@@ -208,6 +208,9 @@ class DynamicPartitionDataWriter(
208208
* @param bucketId the bucket which all tuples being written by this `OutputWriter` belong to
209209
*/
210210
private def newOutputWriter(partitionValues: Option[InternalRow], bucketId: Option[Int]): Unit = {
211+
recordsInFile = 0
212+
releaseResources()
213+
211214
val partDir = partitionValues.map(getPartitionPath(_))
212215
partDir.foreach(updatedPartitions.add)
213216

@@ -249,21 +252,16 @@ class DynamicPartitionDataWriter(
249252
statsTrackers.foreach(_.newBucket(currentBucketId.get))
250253
}
251254

252-
recordsInFile = 0
253255
fileCounter = 0
254-
255-
releaseResources()
256256
newOutputWriter(currentPartionValues, currentBucketId)
257257
} else if (description.maxRecordsPerFile > 0 &&
258258
recordsInFile >= description.maxRecordsPerFile) {
259259
// Exceeded the threshold in terms of the number of records per file.
260260
// Create a new file by increasing the file counter.
261-
recordsInFile = 0
262261
fileCounter += 1
263262
assert(fileCounter < MAX_FILE_COUNTER,
264263
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
265264

266-
releaseResources()
267265
newOutputWriter(currentPartionValues, currentBucketId)
268266
}
269267
val outputRow = getOutputRow(record)

0 commit comments

Comments
 (0)