Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ class Analyzer(
}

def executeAndCheck(plan: LogicalPlan, tracker: QueryPlanningTracker): LogicalPlan = {
if (plan.analyzed) return plan
AnalysisHelper.markInAnalyzer {
val analyzed = executeAndTrack(plan, tracker)
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ trait AnalysisHelper extends QueryPlan[LogicalPlan] { self: LogicalPlan =>
* Recursively marks all nodes in this plan tree as analyzed.
* This should only be called by [[CheckAnalysis]].
*/
private[catalyst] def setAnalyzed(): Unit = {
private[sql] def setAnalyzed(): Unit = {
if (!_analyzed) {
_analyzed = true
children.foreach(_.setAnalyzed())
Expand Down Expand Up @@ -180,6 +180,11 @@ trait AnalysisHelper extends QueryPlan[LogicalPlan] { self: LogicalPlan =>
super.transformAllExpressions(rule)
}

override def clone(): LogicalPlan = {
val cloned = super.clone()
if (analyzed) cloned.setAnalyzed()
cloned
}
}


Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,12 @@ class QueryExecution(
lazy val optimizedPlan: LogicalPlan = executePhase(QueryPlanningTracker.OPTIMIZATION) {
// clone the plan to avoid sharing the plan instance between different stages like analyzing,
// optimizing and planning.
sparkSession.sessionState.optimizer.executeAndTrack(withCachedData.clone(), tracker)
val plan = sparkSession.sessionState.optimizer.executeAndTrack(withCachedData.clone(), tracker)
// We do not want optimized plans to be re-analyzed as literals that have been constant folded
// and such can cause issues during analysis. While `clone` should maintain the `analyzed` state
// of the LogicalPlan, we set the plan as analyzed here as well out of paranoia.
plan.setAnalyzed()
plan
}

private def assertOptimized(): Unit = optimizedPlan
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@ package org.apache.spark.sql.execution.datasources.v2
import java.util.UUID

import org.apache.spark.SparkException
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.connector.catalog.SupportsWrite
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsOverwrite, SupportsTruncate, V1WriteBuilder, WriteBuilder}
import org.apache.spark.sql.execution.{AlreadyOptimized, SparkPlan}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.sources.{AlwaysTrue, Filter, InsertableRelation}
import org.apache.spark.sql.util.CaseInsensitiveStringMap

Expand Down Expand Up @@ -113,8 +114,7 @@ trait SupportsV1Write extends SparkPlan {
def plan: LogicalPlan

protected def writeWithV1(relation: InsertableRelation): Seq[InternalRow] = {
// The `plan` is already optimized, we should not analyze and optimize it again.
relation.insert(AlreadyOptimized.dataFrame(sqlContext.sparkSession, plan), overwrite = false)
relation.insert(Dataset.ofRows(sqlContext.sparkSession, plan), overwrite = false)
Nil
}
}

This file was deleted.