Skip to content

Commit f4482ca

Browse files
committed
Minor bug fix and more tests
1 parent ec9950c commit f4482ca

File tree

3 files changed

+112
-71
lines changed

3 files changed

+112
-71
lines changed

sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,8 @@ private[sql] class FSBasedParquetRelation(
151151

152152
this.paths.toSet == that.paths.toSet &&
153153
schemaEquality &&
154-
this.maybeDataSchema == that.maybeDataSchema
155-
this.maybePartitionSpec == that.maybePartitionSpec
154+
this.maybeDataSchema == that.maybeDataSchema &&
155+
this.partitionColumns == that.partitionColumns
156156

157157
case _ => false
158158
}
@@ -214,7 +214,7 @@ private[sql] class FSBasedParquetRelation(
214214
}
215215

216216
ParquetOutputFormat.setWriteSupportClass(job, writeSupportClass)
217-
RowWriteSupport.setSchema(dataSchema.asNullable.toAttributes, conf)
217+
RowWriteSupport.setSchema(dataSchema.toAttributes, conf)
218218

219219
// Sets compression scheme
220220
conf.set(
@@ -271,7 +271,7 @@ private[sql] class FSBasedParquetRelation(
271271

272272
// TODO Stop using `FilteringParquetRowInputFormat` and overriding `getPartition`.
273273
// After upgrading to Parquet 1.6.0, we should be able to stop caching `FileStatus` objects and
274-
// footers. Especially when a global arbitratve schema (either from metastore or data source
274+
// footers. Especially when a global arbitrative schema (either from metastore or data source
275275
// DDL) is available.
276276
new NewHadoopRDD(
277277
sqlContext.sparkContext,

sql/core/src/main/scala/org/apache/spark/sql/sources/commands.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ private[sql] abstract class BaseWriterContainer(
337337
}
338338

339339
def abortJob(): Unit = {
340-
// outputCommitter.abortJob(jobContext, JobStatus.State.FAILED)
340+
outputCommitter.abortJob(jobContext, JobStatus.State.FAILED)
341341
logError(s"Job $jobId aborted.")
342342
}
343343
}

0 commit comments

Comments
 (0)