Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,13 @@ class Dataset[T] private[sql](
private[sql] def getRows(
numRows: Int,
truncate: Int): Seq[Seq[String]] = {
val newDf = toDF()
val newDf = logicalPlan match {
case c: CommandResult =>
// Convert to `LocalRelation` and let `ConvertToLocalRelation` do the casting locally to
// avoid triggering a job
Dataset.ofRows(sparkSession, LocalRelation(c.output, c.rows))
case _ => toDF()
}
val castCols = newDf.logicalPlan.output.map { col =>
// Since binary types in top-level schema fields have a specific format to print,
// so we do not cast them to strings here.
Expand Down
19 changes: 19 additions & 0 deletions sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2474,6 +2474,25 @@ class DatasetSuite extends QueryTest
)
assert(result == expected)
}

test("SPARK-43124: Show does not trigger job execution on CommandResults") {
withSQLConf(SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> "") {
withTable("t1") {
sql("create table t1(c int) using parquet")

@volatile var jobCounter = 0
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
jobCounter += 1
}
}
withListener(spark.sparkContext, listener) { _ =>
sql("show tables").show()
}
assert(jobCounter === 0)
}
}
}
}

class DatasetLargeResultCollectingSuite extends QueryTest
Expand Down