diff --git a/docs/sql-data-sources-parquet.md b/docs/sql-data-sources-parquet.md
index dcd2936518465..5532bf9fa3f66 100644
--- a/docs/sql-data-sources-parquet.md
+++ b/docs/sql-data-sources-parquet.md
@@ -295,18 +295,6 @@ Configuration of Parquet can be done using the `setConf` method on `SparkSession
-
- spark.sql.optimizer.metadataOnly |
- true |
-
-
- When true, enable the metadata-only query optimization that use the table's metadata to
- produce the partition columns instead of table scans. It applies when all the columns scanned
- are partition columns and the query has an aggregate operator that satisfies distinct
- semantics.
-
- |
-
spark.sql.parquet.writeLegacyFormat |
false |
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 6b301c3c9cb5b..da595e7a352db 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -582,12 +582,14 @@ object SQLConf {
.createWithDefault(HiveCaseSensitiveInferenceMode.INFER_AND_SAVE.toString)
val OPTIMIZER_METADATA_ONLY = buildConf("spark.sql.optimizer.metadataOnly")
+ .internal()
.doc("When true, enable the metadata-only query optimization that use the table's metadata " +
"to produce the partition columns instead of table scans. It applies when all the columns " +
"scanned are partition columns and the query has an aggregate operator that satisfies " +
- "distinct semantics.")
+ "distinct semantics. By default the optimization is disabled, since it may return " +
+ "incorrect results when the files are empty.")
.booleanConf
- .createWithDefault(true)
+ .createWithDefault(false)
val COLUMN_NAME_OF_CORRUPT_RECORD = buildConf("spark.sql.columnNameOfCorruptRecord")
.doc("The name of internal column for storing raw/un-parsed JSON and CSV records that fail " +
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuery.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuery.scala
index 3ca03ab2939aa..45e5f415e8da1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuery.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuery.scala
@@ -72,6 +72,11 @@ case class OptimizeMetadataOnlyQuery(catalog: SessionCatalog) extends Rule[Logic
})
}
if (isAllDistinctAgg) {
+ logWarning("Since configuration `spark.sql.optimizer.metadataOnly` is enabled, " +
+ "Spark will scan partition-level metadata without scanning data files. " +
+ "This could result in wrong results when the partition metadata exists but the " +
+ "inclusive data files are empty."
+ )
a.withNewChildren(Seq(replaceTableScanWithPartitionMetadata(child, rel, filters)))
} else {
a
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 806f0b2239fe6..b8c4d73f1b2b4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -2966,6 +2966,43 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
}
}
+
+ test("SPARK-26709: OptimizeMetadataOnlyQuery does not handle empty records correctly") {
+ Seq(true, false).foreach { enableOptimizeMetadataOnlyQuery =>
+ withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> enableOptimizeMetadataOnlyQuery.toString) {
+ withTable("t") {
+ sql("CREATE TABLE t (col1 INT, p1 INT) USING PARQUET PARTITIONED BY (p1)")
+ sql("INSERT INTO TABLE t PARTITION (p1 = 5) SELECT ID FROM range(1, 1)")
+ if (enableOptimizeMetadataOnlyQuery) {
+ // The result is wrong if we enable the configuration.
+ checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(5))
+ } else {
+ checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(null))
+ }
+ checkAnswer(sql("SELECT MAX(col1) FROM t"), Row(null))
+ }
+
+ withTempPath { path =>
+ val tabLocation = path.getCanonicalPath
+ val partLocation1 = tabLocation + "/p=3"
+ val partLocation2 = tabLocation + "/p=1"
+ // SPARK-23271 empty RDD when saved should write a metadata only file
+ val df = spark.emptyDataFrame.select(lit(1).as("col"))
+ df.write.parquet(partLocation1)
+ val df2 = spark.range(10).toDF("col")
+ df2.write.parquet(partLocation2)
+ val readDF = spark.read.parquet(tabLocation)
+ if (enableOptimizeMetadataOnlyQuery) {
+ // The result is wrong if we enable the configuration.
+ checkAnswer(readDF.selectExpr("max(p)"), Row(3))
+ } else {
+ checkAnswer(readDF.selectExpr("max(p)"), Row(1))
+ }
+ checkAnswer(readDF.selectExpr("max(col)"), Row(9))
+ }
+ }
+ }
+ }
}
case class Foo(bar: Option[String])
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 70efad103d13e..d506edc0cf088 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -2330,4 +2330,22 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
}
+ test("SPARK-26709: OptimizeMetadataOnlyQuery does not handle empty records correctly") {
+ Seq(true, false).foreach { enableOptimizeMetadataOnlyQuery =>
+ withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> enableOptimizeMetadataOnlyQuery.toString) {
+ withTable("t") {
+ sql("CREATE TABLE t (col1 INT, p1 INT) USING PARQUET PARTITIONED BY (p1)")
+ sql("INSERT INTO TABLE t PARTITION (p1 = 5) SELECT ID FROM range(1, 1)")
+ if (enableOptimizeMetadataOnlyQuery) {
+ // The result is wrong if we enable the configuration.
+ checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(5))
+ } else {
+ checkAnswer(sql("SELECT MAX(p1) FROM t"), Row(null))
+ }
+ checkAnswer(sql("SELECT MAX(col1) FROM t"), Row(null))
+ }
+ }
+ }
+ }
+
}