@@ -232,7 +232,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
232232
233233 runCommand(df.sparkSession, " save" ) {
234234 SaveIntoDataSourceCommand (
235- query = df.queryExecution.analyzed ,
235+ query = df.logicalPlan ,
236236 provider = source,
237237 partitionColumns = partitioningColumns.getOrElse(Nil ),
238238 options = extraOptions.toMap,
@@ -284,7 +284,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
284284 InsertIntoTable (
285285 table = UnresolvedRelation (tableIdent),
286286 partition = Map .empty[String , Option [String ]],
287- query = df.queryExecution.analyzed ,
287+ query = df.logicalPlan ,
288288 overwrite = mode == SaveMode .Overwrite ,
289289 ifPartitionNotExists = false )
290290 }
@@ -370,7 +370,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
370370
371371 case (true , SaveMode .Overwrite ) =>
372372 // Get all input data source or hive relations of the query.
373- val srcRelations = df.queryExecution.analyzed .collect {
373+ val srcRelations = df.logicalPlan .collect {
374374 case LogicalRelation (src : BaseRelation , _, _) => src
375375 case relation : CatalogRelation if DDLUtils .isHiveTable(relation.tableMeta) =>
376376 relation.tableMeta.identifier
@@ -417,8 +417,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
417417 partitionColumnNames = partitioningColumns.getOrElse(Nil ),
418418 bucketSpec = getBucketSpec)
419419
420- runCommand(df.sparkSession, " saveAsTable" )(CreateTable (tableDesc, mode,
421- Some (df.queryExecution.analyzed)))
420+ runCommand(df.sparkSession, " saveAsTable" )(CreateTable (tableDesc, mode, Some (df.logicalPlan)))
422421 }
423422
424423 /**
0 commit comments