-
Notifications
You must be signed in to change notification settings - Fork 28.9k
[SPARK-21977] SinglePartition optimizations break certain Streaming Stateful Aggregation requirements #19196
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
b7aeed6
4a7d124
00fa592
090044c
2f94951
12cf02a
c5b7f23
e178d10
6cc8c46
be39125
f34fc8a
505af43
d0e9094
8a6eafe
4eb7f4f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.errors._ | |
| import org.apache.spark.sql.catalyst.expressions._ | ||
| import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateUnsafeProjection, Predicate} | ||
| import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark | ||
| import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, Distribution, Partitioning} | ||
| import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, Partitioning} | ||
| import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._ | ||
| import org.apache.spark.sql.execution._ | ||
| import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} | ||
|
|
@@ -200,18 +200,35 @@ case class StateStoreRestoreExec( | |
| sqlContext.sessionState, | ||
| Some(sqlContext.streams.stateStoreCoordinator)) { case (store, iter) => | ||
| val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output) | ||
| iter.flatMap { row => | ||
| val key = getKey(row) | ||
| val savedState = store.get(key) | ||
| numOutputRows += 1 | ||
| row +: Option(savedState).toSeq | ||
| val hasInput = iter.hasNext | ||
| if (!hasInput && keyExpressions.isEmpty) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. add docs on why we are doing this. similar to the docs in other places related to batch aggregation.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. there wasn't any docs in batch :) |
||
| // If our `keyExpressions` are empty, we're getting a global aggregation. In that case | ||
| // the `HashAggregateExec` will output a 0 value for the partial merge. We need to | ||
| // restore the value, so that we don't overwrite our state with a 0 value, but rather | ||
| // merge the 0 with existing state. | ||
| store.iterator().map(_.value) | ||
| } else { | ||
| iter.flatMap { row => | ||
| val key = getKey(row) | ||
| val savedState = store.get(key) | ||
| numOutputRows += 1 | ||
| row +: Option(savedState).toSeq | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| override def output: Seq[Attribute] = child.output | ||
|
|
||
| override def outputPartitioning: Partitioning = child.outputPartitioning | ||
|
|
||
| override def requiredChildDistribution: Seq[Distribution] = { | ||
| if (keyExpressions.isEmpty) { | ||
| AllTuples :: Nil | ||
| } else { | ||
| ClusteredDistribution(keyExpressions) :: Nil | ||
| } | ||
| } | ||
| } | ||
|
|
||
| /** | ||
|
|
@@ -351,6 +368,14 @@ case class StateStoreSaveExec( | |
| override def output: Seq[Attribute] = child.output | ||
|
|
||
| override def outputPartitioning: Partitioning = child.outputPartitioning | ||
|
|
||
| override def requiredChildDistribution: Seq[Distribution] = { | ||
| if (keyExpressions.isEmpty) { | ||
| AllTuples :: Nil | ||
| } else { | ||
| ClusteredDistribution(keyExpressions) :: Nil | ||
| } | ||
| } | ||
| } | ||
|
|
||
| /** Physical operator for executing streaming Deduplicate. */ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,132 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||
| * contributor license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright ownership. | ||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| * (the "License"); you may not use this file except in compliance with | ||
| * the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.spark.sql.streaming | ||
|
|
||
| import java.util.UUID | ||
|
|
||
| import org.apache.spark.rdd.RDD | ||
| import org.apache.spark.sql.catalyst.InternalRow | ||
| import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute | ||
| import org.apache.spark.sql.catalyst.expressions.Attribute | ||
| import org.apache.spark.sql.catalyst.plans.physical._ | ||
| import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest, UnaryExecNode} | ||
| import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchange} | ||
| import org.apache.spark.sql.execution.streaming.{IncrementalExecution, OffsetSeqMetadata, StatefulOperator, StatefulOperatorStateInfo} | ||
| import org.apache.spark.sql.test.SharedSQLContext | ||
|
|
||
| class EnsureStatefulOpPartitioningSuite extends SparkPlanTest with SharedSQLContext { | ||
|
|
||
| import testImplicits._ | ||
| super.beforeAll() | ||
|
|
||
| private val baseDf = Seq((1, "A"), (2, "b")).toDF("num", "char") | ||
|
|
||
| testEnsureStatefulOpPartitioning( | ||
| "ClusteredDistribution generates Exchange with HashPartitioning", | ||
| baseDf.queryExecution.sparkPlan, | ||
| requiredDistribution = keys => ClusteredDistribution(keys), | ||
| expectedPartitioning = | ||
| keys => HashPartitioning(keys, spark.sessionState.conf.numShufflePartitions), | ||
| expectShuffle = true) | ||
|
|
||
| testEnsureStatefulOpPartitioning( | ||
| "ClusteredDistribution with coalesce(1) generates Exchange with HashPartitioning", | ||
| baseDf.coalesce(1).queryExecution.sparkPlan, | ||
| requiredDistribution = keys => ClusteredDistribution(keys), | ||
| expectedPartitioning = | ||
| keys => HashPartitioning(keys, spark.sessionState.conf.numShufflePartitions), | ||
| expectShuffle = true) | ||
|
|
||
| testEnsureStatefulOpPartitioning( | ||
| "AllTuples generates Exchange with SinglePartition", | ||
| baseDf.queryExecution.sparkPlan, | ||
| requiredDistribution = _ => AllTuples, | ||
| expectedPartitioning = _ => SinglePartition, | ||
| expectShuffle = true) | ||
|
|
||
| testEnsureStatefulOpPartitioning( | ||
| "AllTuples with coalesce(1) doesn't need Exchange", | ||
| baseDf.coalesce(1).queryExecution.sparkPlan, | ||
| requiredDistribution = _ => AllTuples, | ||
| expectedPartitioning = _ => SinglePartition, | ||
| expectShuffle = false) | ||
|
|
||
| /** | ||
| * For `StatefulOperator` with the given `requiredChildDistribution`, and child SparkPlan | ||
| * `inputPlan`, ensures that the incremental planner adds exchanges, if required, in order to | ||
| * ensure the expected partitioning. | ||
| */ | ||
| private def testEnsureStatefulOpPartitioning( | ||
| testName: String, | ||
| inputPlan: SparkPlan, | ||
| requiredDistribution: Seq[Attribute] => Distribution, | ||
| expectedPartitioning: Seq[Attribute] => Partitioning, | ||
| expectShuffle: Boolean): Unit = { | ||
| test(testName) { | ||
| val operator = TestStatefulOperator(inputPlan, requiredDistribution(inputPlan.output.take(1))) | ||
| val executed = executePlan(operator, OutputMode.Complete()) | ||
| if (expectShuffle) { | ||
| val exchange = executed.children.find(_.isInstanceOf[Exchange]) | ||
| if (exchange.isEmpty) { | ||
| fail(s"Was expecting an exchange but didn't get one in:\n$executed") | ||
| } | ||
| assert(exchange.get === | ||
| ShuffleExchange(expectedPartitioning(inputPlan.output.take(1)), inputPlan), | ||
| s"Exchange didn't have expected properties:\n${exchange.get}") | ||
| } else { | ||
| assert(!executed.children.exists(_.isInstanceOf[Exchange]), | ||
| s"Unexpected exchange found in:\n$executed") | ||
| } | ||
| } | ||
| } | ||
|
|
||
| /** Executes a SparkPlan using the IncrementalPlanner used for Structured Streaming. */ | ||
| private def executePlan( | ||
| p: SparkPlan, | ||
| outputMode: OutputMode = OutputMode.Append()): SparkPlan = { | ||
| val execution = new IncrementalExecution( | ||
| spark, | ||
| null, | ||
| OutputMode.Complete(), | ||
| "chk", | ||
| UUID.randomUUID(), | ||
| 0L, | ||
| OffsetSeqMetadata()) { | ||
| override lazy val sparkPlan: SparkPlan = p transform { | ||
| case plan: SparkPlan => | ||
| val inputMap = plan.children.flatMap(_.output).map(a => (a.name, a)).toMap | ||
| plan transformExpressions { | ||
| case UnresolvedAttribute(Seq(u)) => | ||
| inputMap.getOrElse(u, | ||
| sys.error(s"Invalid Test: Cannot resolve $u given input $inputMap")) | ||
| } | ||
| } | ||
| } | ||
| execution.executedPlan | ||
| } | ||
| } | ||
|
|
||
| /** Used to emulate a `StatefulOperator` with the given requiredDistribution. */ | ||
| case class TestStatefulOperator( | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this has to live outside the test suite, otherwise we get weird failed to |
||
| child: SparkPlan, | ||
| requiredDist: Distribution) extends UnaryExecNode with StatefulOperator { | ||
| override def output: Seq[Attribute] = child.output | ||
| override def doExecute(): RDD[InternalRow] = child.execute() | ||
| override def requiredChildDistribution: Seq[Distribution] = requiredDist :: Nil | ||
| override def stateInfo: Option[StatefulOperatorStateInfo] = None | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
+1 good catch