Skip to content

Commit 331b46d

Browse files
author
Davies Liu
committed
remove metrics, because they are very slow
1 parent f0f3da6 commit 331b46d

File tree

3 files changed

+5
-30
lines changed

3 files changed

+5
-30
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,9 @@ import scala.collection.mutable.ArrayBuffer
2222
import org.apache.spark.rdd.RDD
2323
import org.apache.spark.sql.SQLContext
2424
import org.apache.spark.sql.catalyst.InternalRow
25-
import org.apache.spark.sql.catalyst.expressions.{Attribute, BoundReference, Expression, LeafExpression}
25+
import org.apache.spark.sql.catalyst.expressions.{Attribute, BoundReference, LeafExpression}
2626
import org.apache.spark.sql.catalyst.expressions.codegen._
2727
import org.apache.spark.sql.catalyst.rules.Rule
28-
import org.apache.spark.sql.execution.metric.LongSQLMetric
2928

3029
/**
3130
* An interface for those physical operators that support codegen.
@@ -91,21 +90,9 @@ trait CodegenSupport extends SparkPlan {
9190
* }
9291
*/
9392
def doConsume(ctx: CodegenContext, child: SparkPlan, input: Seq[ExprCode]): String
94-
95-
/**
96-
* Return a term for a LongSQLMetric specified by given name.
97-
*/
98-
protected def termForLongMetric(ctx: CodegenContext, name: String): String = {
99-
val metric = longMetric(name)
100-
val idx = ctx.references.length
101-
ctx.references += metric
102-
val term = ctx.freshName(name)
103-
val clsName = classOf[LongSQLMetric].getName
104-
ctx.addMutableState(clsName, term, s"$term = ($clsName) references[$idx];")
105-
term
106-
}
10793
}
10894

95+
10996
/**
11097
* InputAdapter is used to hide a SparkPlan from a subtree that support codegen.
11198
*

sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import org.apache.spark.sql.catalyst.InternalRow
2424
import org.apache.spark.sql.catalyst.expressions._
2525
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode, ExpressionCanonicalizer}
2626
import org.apache.spark.sql.catalyst.plans.physical._
27-
import org.apache.spark.sql.execution.metric.{LongSQLMetric, SQLMetrics}
27+
import org.apache.spark.sql.execution.metric.SQLMetrics
2828
import org.apache.spark.sql.types.LongType
2929
import org.apache.spark.util.MutablePair
3030
import org.apache.spark.util.random.PoissonSampler
@@ -42,14 +42,11 @@ case class Project(projectList: Seq[NamedExpression], child: SparkPlan)
4242
}
4343

4444
override def doConsume(ctx: CodegenContext, child: SparkPlan, input: Seq[ExprCode]): String = {
45-
val numRows = termForLongMetric(ctx, "numRows")
4645
val exprs = projectList.map(x =>
4746
ExpressionCanonicalizer.execute(BindReferences.bindReference(x, child.output)))
4847
ctx.currentVars = input
4948
val output = exprs.map(_.gen(ctx))
5049
s"""
51-
| // Projection
52-
| $numRows.add(1);
5350
| ${output.map(_.code).mkString("\n")}
5451
|
5552
| ${consume(ctx, output)}
@@ -84,18 +81,13 @@ case class Filter(condition: Expression, child: SparkPlan) extends UnaryNode wit
8481
}
8582

8683
override def doConsume(ctx: CodegenContext, child: SparkPlan, input: Seq[ExprCode]): String = {
87-
val numInputTerm = termForLongMetric(ctx, "numInputRows")
88-
val numOutputTerm = termForLongMetric(ctx, "numOutputRows")
8984
val expr = ExpressionCanonicalizer.execute(
9085
BindReferences.bindReference(condition, child.output))
9186
ctx.currentVars = input
9287
val eval = expr.gen(ctx)
9388
s"""
94-
| // Filter
95-
| $numInputTerm.add(1);
9689
| ${eval.code}
9790
| if (!${eval.isNull} && ${eval.value}) {
98-
| $numOutputTerm.add(1);
9991
| ${consume(ctx, ctx.currentVars)}
10092
| }
10193
""".stripMargin

sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -135,13 +135,9 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
135135
test("WholeStageCodegen metrics") {
136136
// Assume the execution plan is
137137
// WholeStageCodegen(nodeId = 0, Range(nodeId = 2) -> Filter(nodeId = 1))
138+
// TODO: update metrics in generated operators
138139
val df = sqlContext.range(10).filter('id < 5)
139-
testSparkPlanMetrics(df, 1, Map(
140-
1L -> ("Filter", Map(
141-
"number of input rows" -> 10L,
142-
"number of output rows" -> 5L
143-
)))
144-
)
140+
testSparkPlanMetrics(df, 1, Map.empty)
145141
}
146142

147143
test("TungstenAggregate metrics") {

0 commit comments

Comments
 (0)