File tree Expand file tree Collapse file tree 3 files changed +112
-71
lines changed
core/src/main/scala/org/apache/spark/sql
hive/src/test/scala/org/apache/spark/sql/sources Expand file tree Collapse file tree 3 files changed +112
-71
lines changed Original file line number Diff line number Diff line change @@ -151,8 +151,8 @@ private[sql] class FSBasedParquetRelation(
151151
152152 this .paths.toSet == that.paths.toSet &&
153153 schemaEquality &&
154- this .maybeDataSchema == that.maybeDataSchema
155- this .maybePartitionSpec == that.maybePartitionSpec
154+ this .maybeDataSchema == that.maybeDataSchema &&
155+ this .partitionColumns == that.partitionColumns
156156
157157 case _ => false
158158 }
@@ -214,7 +214,7 @@ private[sql] class FSBasedParquetRelation(
214214 }
215215
216216 ParquetOutputFormat .setWriteSupportClass(job, writeSupportClass)
217- RowWriteSupport .setSchema(dataSchema.asNullable. toAttributes, conf)
217+ RowWriteSupport .setSchema(dataSchema.toAttributes, conf)
218218
219219 // Sets compression scheme
220220 conf.set(
@@ -271,7 +271,7 @@ private[sql] class FSBasedParquetRelation(
271271
272272 // TODO Stop using `FilteringParquetRowInputFormat` and overriding `getPartition`.
273273 // After upgrading to Parquet 1.6.0, we should be able to stop caching `FileStatus` objects and
274- // footers. Especially when a global arbitratve schema (either from metastore or data source
274+ // footers. Especially when a global arbitrative schema (either from metastore or data source
275275 // DDL) is available.
276276 new NewHadoopRDD (
277277 sqlContext.sparkContext,
Original file line number Diff line number Diff line change @@ -337,7 +337,7 @@ private[sql] abstract class BaseWriterContainer(
337337 }
338338
339339 def abortJob (): Unit = {
340- // outputCommitter.abortJob(jobContext, JobStatus.State.FAILED)
340+ outputCommitter.abortJob(jobContext, JobStatus .State .FAILED )
341341 logError(s " Job $jobId aborted. " )
342342 }
343343}
You can’t perform that action at this time.
0 commit comments