Skip to content

Commit 7040dc9

Browse files
author
Wesley Tang
committed
[SPARK-16664][SQL] Fix persist call on Data frames with more than 200 columns is wiping out the data.
1 parent e10b874 commit 7040dc9

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ object GenerateColumnAccessor extends CodeGenerator[Seq[DataType], ColumnarItera
127127
val groupedAccessorsItr = initializeAccessors.grouped(numberOfStatementsThreshold)
128128
val groupedExtractorsItr = extractors.grouped(numberOfStatementsThreshold)
129129
var groupedAccessorsLength = 0
130-
groupedAccessorsItr.zipWithIndex.map { case (body, i) =>
130+
groupedAccessorsItr.zipWithIndex.foreach { case (body, i) =>
131131
groupedAccessorsLength += 1
132132
val funcName = s"accessors$i"
133133
val funcCode = s"""
@@ -137,7 +137,7 @@ object GenerateColumnAccessor extends CodeGenerator[Seq[DataType], ColumnarItera
137137
""".stripMargin
138138
ctx.addNewFunction(funcName, funcCode)
139139
}
140-
groupedExtractorsItr.zipWithIndex.map { case (body, i) =>
140+
groupedExtractorsItr.zipWithIndex.foreach { case (body, i) =>
141141
val funcName = s"extractors$i"
142142
val funcCode = s"""
143143
|private void $funcName() {

0 commit comments

Comments
 (0)