Skip to content

Commit 3ee18ce

Browse files
committed
fix.
1 parent 14054ff commit 3ee18ce

File tree

2 files changed

+10
-2
lines changed

2 files changed

+10
-2
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,12 @@ import org.apache.spark.sql.execution.metric.SQLMetrics
2828
*/
2929
case class LocalTableScanExec(
3030
output: Seq[Attribute],
31-
rows: Seq[InternalRow]) extends LeafExecNode {
31+
@transient rows: Seq[InternalRow]) extends LeafExecNode {
3232

3333
override lazy val metrics = Map(
3434
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
3535

36-
private lazy val unsafeRows: Array[InternalRow] = {
36+
@transient private lazy val unsafeRows: Array[InternalRow] = {
3737
if (rows.isEmpty) {
3838
Array.empty
3939
} else {

sql/core/src/test/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuerySuite.scala

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,4 +117,12 @@ class OptimizeMetadataOnlyQuerySuite extends QueryTest with SharedSQLContext {
117117
"select partcol1, max(partcol2) from srcpart where partcol1 = 0 group by rollup (partcol1)",
118118
"select partcol2 from (select partcol2 from srcpart where partcol1 = 0 union all " +
119119
"select partcol2 from srcpart where partcol1 = 1) t group by partcol2")
120+
121+
test("SPARK-21884 Fix StackOverflowError on MetadataOnlyQuery") {
122+
withTable("t_1000") {
123+
sql("CREATE TABLE t_1000 (a INT, p INT) USING PARQUET PARTITIONED BY (p)")
124+
(1 to 1000).foreach(p => sql(s"ALTER TABLE t_1000 ADD PARTITION (p=$p)"))
125+
sql("SELECT COUNT(DISTINCT p) FROM t_1000").collect()
126+
}
127+
}
120128
}

0 commit comments

Comments
 (0)