@@ -994,17 +994,12 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper {
994994 |WHERE t1id * 10 = t3.id3 * 10
995995 """ .stripMargin).queryExecution.executedPlan
996996 val sortNodes = collect(planned) { case s : SortExec => s }
997- assert(sortNodes.size == 3 )
997+ assert(sortNodes.size == 4 )
998998 val exchangeNodes = collect(planned) { case e : ShuffleExchangeExec => e }
999- assert(exchangeNodes.size == 3 )
999+ assert(exchangeNodes.size == 4 )
10001000
10011001 val projects = collect(planned) { case p : ProjectExec => p }
1002- assert(projects.exists(_.outputPartitioning match {
1003- case HashPartitioning (Seq (Multiply (ar1 : AttributeReference , _, _)), _) =>
1004- ar1.name == " t1id"
1005- case _ =>
1006- false
1007- }))
1002+ assert(! projects.exists(_.outputPartitioning.isInstanceOf [HashPartitioning ]))
10081003 }
10091004 }
10101005 }
@@ -1103,13 +1098,13 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper {
11031098 // t12 is already sorted on `t1.id * 2`. and we need to sort it on `2 * t12.id`
11041099 // for 2nd join. So sorting on t12 can be avoided
11051100 val sortNodes = planned.collect { case s : SortExec => s }
1106- assert(sortNodes.size == 3 )
1101+ assert(sortNodes.size == 4 )
11071102 val outputOrdering = planned.outputOrdering
11081103 assert(outputOrdering.size == 1 )
11091104 // Sort order should have 3 childrens, not 4. This is because t1.id*2 and 2*t1.id are same
1110- assert(outputOrdering.head.children.size == 3 )
1105+ assert(outputOrdering.head.children.size == 2 )
11111106 assert(outputOrdering.head.children.count(_.isInstanceOf [AttributeReference ]) == 2 )
1112- assert(outputOrdering.head.children.count(_.isInstanceOf [Multiply ]) == 1 )
1107+ assert(outputOrdering.head.children.count(_.isInstanceOf [Multiply ]) == 0 )
11131108 }
11141109 }
11151110 }
0 commit comments