File tree Expand file tree Collapse file tree 2 files changed +21
-2
lines changed
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources Expand file tree Collapse file tree 2 files changed +21
-2
lines changed Original file line number Diff line number Diff line change @@ -539,6 +539,25 @@ object PartitioningUtils {
539539 }).asNullable
540540 }
541541
542+ def requestedPartitionColumnIds (
543+ requiredSchema : StructType ,
544+ partitionSchema : StructType ,
545+ caseSensitive : Boolean ): Array [Int ] = {
546+ val columnNameMap =
547+ partitionSchema.fields.map(getColName(_, caseSensitive)).zipWithIndex.toMap
548+ requiredSchema.fields.map { field =>
549+ columnNameMap.getOrElse(getColName(field, caseSensitive), - 1 )
550+ }
551+ }
552+
553+ private def getColName (f : StructField , caseSensitive : Boolean ): String = {
554+ if (caseSensitive) {
555+ f.name
556+ } else {
557+ f.name.toLowerCase(Locale .ROOT )
558+ }
559+ }
560+
542561 private def columnNameEquality (caseSensitive : Boolean ): (String , String ) => Boolean = {
543562 if (caseSensitive) {
544563 org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
Original file line number Diff line number Diff line change @@ -207,8 +207,8 @@ class OrcFileFormat
207207 val iter = new RecordReaderIterator (batchReader)
208208 Option (TaskContext .get()).foreach(_.addTaskCompletionListener[Unit ](_ => iter.close()))
209209 val requestedDataColIds = requestedColIds ++ Array .fill(partitionSchema.length)(- 1 )
210- val requestedPartitionColIds =
211- Array .fill(requiredSchema.length)( - 1 ) ++ Range ( 0 , partitionSchema.length )
210+ val requestedPartitionColIds = PartitioningUtils .requestedPartitionColumnIds(
211+ resultSchema , partitionSchema, isCaseSensitive )
212212 batchReader.initialize(fileSplit, taskAttemptContext)
213213 batchReader.initBatch(
214214 reader.getSchema,
You can’t perform that action at this time.
0 commit comments