@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker
2828import org .apache .spark .sql .catalyst .plans .logical .{LogicalPlan , ReturnAnswer }
2929import org .apache .spark .sql .catalyst .rules .Rule
3030import org .apache .spark .sql .catalyst .util .DateTimeUtils
31- import org .apache .spark .sql .execution .command .{DescribeTableCommand , ExecutedCommandExec , ShowTablesCommand }
31+ import org .apache .spark .sql .execution .command .{DescribeTableCommand , ExecutedCommandExec , InsertTableCommand , ShowTablesCommand }
3232import org .apache .spark .sql .execution .exchange .{EnsureRequirements , ReuseExchange }
3333import org .apache .spark .sql .types .{BinaryType , DateType , DecimalType , TimestampType , _ }
3434import org .apache .spark .util .Utils
@@ -114,7 +114,7 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
114114
115115
116116 /**
117- * Returns the result as a hive compatible sequence of strings. This is for testing only.
117+ * Returns the result as a hive compatible sequence of strings.
118118 */
119119 def hiveResultString (): Seq [String ] = executedPlan match {
120120 case ExecutedCommandExec (desc : DescribeTableCommand ) =>
@@ -130,12 +130,17 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
130130 // SHOW TABLES in Hive only output table names, while ours output database, table name, isTemp.
131131 case command @ ExecutedCommandExec (s : ShowTablesCommand ) if ! s.isExtended =>
132132 command.executeCollect().map(_.getString(1 ))
133+ case insertCommand @ ExecutedCommandExec (_ : InsertTableCommand ) =>
134+ // Insert command will start a new execution through FileFormatWriter
135+ insertCommand.executeCollect().map(_.toString)
133136 case other =>
134- val result : Seq [Seq [Any ]] = other.executeCollectPublic().map(_.toSeq).toSeq
135- // We need the types so we can output struct field names
136- val types = analyzed.output.map(_.dataType)
137- // Reformat to match hive tab delimited output.
138- result.map(_.zip(types).map(toHiveString)).map(_.mkString(" \t " ))
137+ SQLExecution .withNewExecutionId(sparkSession, this ) {
138+ val result : Seq [Seq [Any ]] = other.executeCollectPublic().map(_.toSeq).toSeq
139+ // We need the types so we can output struct field names
140+ val types = analyzed.output.map(_.dataType)
141+ // Reformat to match hive tab delimited output.
142+ result.map(_.zip(types).map(toHiveString)).map(_.mkString(" \t " ))
143+ }
139144 }
140145
141146 /** Formats a datum (based on the given data type) and returns the string representation. */
0 commit comments