Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,6 @@ case class AvroScan(

override def hashCode(): Int = super.hashCode()

override def description(): String = {
super.description() + ", PushedFilters: " + pushedFilters.mkString("[", ", ", "]")
}

override def getMetaData(): Map[String, String] = {
super.getMetaData() ++ Map("PushedFilters" -> seqToString(pushedFilters))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,6 @@ case class CSVScan(

override def hashCode(): Int = super.hashCode()

override def description(): String = {
super.description() + ", PushedFilters: " + pushedFilters.mkString("[", ", ", "]")
}

override def getMetaData(): Map[String, String] = {
super.getMetaData() ++ Map("PushedFilters" -> seqToString(pushedFilters))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ case class JsonScan(

override def hashCode(): Int = super.hashCode()

override def description(): String = {
super.description() + ", PushedFilters: " + pushedFilters.mkString("[", ", ", "]")
override def getMetaData(): Map[String, String] = {
super.getMetaData() ++ Map("PushedFilters" -> pushedFilters.mkString("[", ", ", "]"))
Comment on lines -94 to +95
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure why JSON was different than the others but made it the same by just providing metadata. Updated related UT below to accommodate.

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,6 @@ case class OrcScan(
("[]", "[]")
}

override def description(): String = {
super.description() + ", PushedFilters: " + seqToString(pushedFilters) +
", PushedAggregation: " + pushedAggregationsStr +
", PushedGroupBy: " + pushedGroupByStr
}

override def getMetaData(): Map[String, String] = {
super.getMetaData() ++ Map("PushedFilters" -> seqToString(pushedFilters)) ++
Map("PushedAggregation" -> pushedAggregationsStr) ++
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,6 @@ case class ParquetScan(
("[]", "[]")
}

override def description(): String = {
super.description() + ", PushedFilters: " + seqToString(pushedFilters) +
", PushedAggregation: " + pushedAggregationsStr +
", PushedGroupBy: " + pushedGroupByStr
}

override def getMetaData(): Map[String, String] = {
super.getMetaData() ++ Map("PushedFilters" -> seqToString(pushedFilters)) ++
Map("PushedAggregation" -> pushedAggregationsStr) ++
Expand Down
19 changes: 5 additions & 14 deletions sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -462,27 +462,18 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite
withTempDir { dir =>
Seq("parquet", "orc", "csv", "json").foreach { fmt =>
val basePath = dir.getCanonicalPath + "/" + fmt
val pushFilterMaps = Map (
"parquet" ->
"|PushedFilters: \\[IsNotNull\\(value\\), GreaterThan\\(value,2\\)\\]",
"orc" ->
"|PushedFilters: \\[IsNotNull\\(value\\), GreaterThan\\(value,2\\)\\]",
"csv" ->
"|PushedFilters: \\[IsNotNull\\(value\\), GreaterThan\\(value,2\\)\\]",
"json" ->
"|remove_marker"
)
val expected_plan_fragment1 =

val expectedPlanFragment =
s"""
|\\(1\\) BatchScan $fmt file:$basePath
|Output \\[2\\]: \\[value#x, id#x\\]
|DataFilters: \\[isnotnull\\(value#x\\), \\(value#x > 2\\)\\]
|Format: $fmt
|Location: InMemoryFileIndex\\([0-9]+ paths\\)\\[.*\\]
|PartitionFilters: \\[isnotnull\\(id#x\\), \\(id#x > 1\\)\\]
${pushFilterMaps.get(fmt).get}
|PushedFilters: \\[IsNotNull\\(value\\), GreaterThan\\(value,2\\)\\]
|ReadSchema: struct\\<value:int\\>
|""".stripMargin.replaceAll("\nremove_marker", "").trim
|""".stripMargin.trim

spark.range(10)
.select(col("id"), col("id").as("value"))
Expand All @@ -500,7 +491,7 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite
.format(fmt)
.load(basePath).where($"id" > 1 && $"value" > 2)
val normalizedOutput = getNormalizedExplain(df, FormattedMode)
assert(expected_plan_fragment1.r.findAllMatchIn(normalizedOutput).length == 1)
assert(expectedPlanFragment.r.findAllMatchIn(normalizedOutput).length == 1)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,8 @@ trait FileSourceAggregatePushDownSuite
spark.read.format(format).load(file.getCanonicalPath).createOrReplaceTempView("test")
Seq("false", "true").foreach { enableVectorizedReader =>
withSQLConf(aggPushDownEnabledKey -> "true",
vectorizedReaderEnabledKey -> enableVectorizedReader) {
vectorizedReaderEnabledKey -> enableVectorizedReader,
SQLConf.MAX_METADATA_STRING_LENGTH.key -> "1000") {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The overridden description part wasn't limited in chars, but the metadata version is, so just bump up the max size to make sure the full string can be found


val testMinWithAllTypes = sql("SELECT min(StringCol), min(BooleanCol), min(ByteCol), " +
"min(BinaryCol), min(ShortCol), min(IntegerCol), min(LongCol), min(FloatCol), " +
Expand Down