Skip to content

Commit c8792de

Browse files
committed
Remove some debug logging
1 parent dda6752 commit c8792de

File tree

2 files changed

+3
-13
lines changed

2 files changed

+3
-13
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717

1818
package org.apache.spark.sql.execution
1919

20+
import scala.util.control.NonFatal
21+
22+
import org.apache.spark.{HashPartitioner, Partitioner, RangePartitioner, SparkEnv}
2023
import org.apache.spark.annotation.DeveloperApi
2124
import org.apache.spark.rdd.{RDD, ShuffledRDD}
2225
import org.apache.spark.serializer.Serializer
@@ -32,8 +35,6 @@ import org.apache.spark.sql.types.DataType
3235
import org.apache.spark.util.MutablePair
3336
import org.apache.spark.{HashPartitioner, Partitioner, RangePartitioner, SparkEnv}
3437

35-
import scala.util.control.NonFatal
36-
3738
object Exchange {
3839
/**
3940
* Returns true when the ordering expressions are a subset of the key.
@@ -194,7 +195,6 @@ case class Exchange(
194195
}
195196
val shuffled = new ShuffledRDD[InternalRow, InternalRow, InternalRow](rdd, part)
196197
if (newOrdering.nonEmpty) {
197-
println("Shuffling with a key ordering")
198198
shuffled.setKeyOrdering(keyOrdering)
199199
}
200200
shuffled.setSerializer(serializer)
@@ -308,7 +308,6 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[
308308
partitioning: Partitioning,
309309
rowOrdering: Seq[SortOrder],
310310
child: SparkPlan): SparkPlan = {
311-
logInfo("In addOperatorsIfNecessary")
312311
val needSort = rowOrdering.nonEmpty && child.outputOrdering != rowOrdering
313312
val needsShuffle = child.outputPartitioning != partitioning
314313

@@ -328,13 +327,9 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[
328327
case NonFatal(e) =>
329328
false
330329
}
331-
logInfo(s"For row with data types ${withShuffle.schema.map(_.dataType)}, " +
332-
s"supportsUnsafeRowConversion = $supportsUnsafeRowConversion")
333330
if (sqlContext.conf.unsafeEnabled && supportsUnsafeRowConversion) {
334-
logInfo("Using unsafe external sort!")
335331
UnsafeExternalSort(rowOrdering, global = false, withShuffle)
336332
} else if (sqlContext.conf.externalSortEnabled) {
337-
logInfo("Not using unsafe sort")
338333
ExternalSort(rowOrdering, global = false, withShuffle)
339334
} else {
340335
Sort(rowOrdering, global = false, withShuffle)
@@ -352,7 +347,6 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[
352347
if (meetsRequirements && compatible && !needsAnySort) {
353348
operator
354349
} else {
355-
logInfo("Looking through Exchange")
356350
// At least one child does not satisfies its required data distribution or
357351
// at least one child's outputPartitioning is not compatible with another child's
358352
// outputPartitioning. In this case, we need to add Exchange operators.
@@ -379,10 +373,7 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[
379373
case NonFatal(e) =>
380374
false
381375
}
382-
logInfo(s"For row with data types ${child.schema.map(_.dataType)}, " +
383-
s"supportsUnsafeRowConversion = $supportsUnsafeRowConversion")
384376
if (sqlContext.conf.unsafeEnabled && supportsUnsafeRowConversion) {
385-
logInfo("Using unsafe external sort!")
386377
UnsafeExternalSort(rowOrdering, global = false, child)
387378
} else if (sqlContext.conf.externalSortEnabled) {
388379
ExternalSort(rowOrdering, global = false, child)

sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,6 @@ case class UnsafeExternalSort(
337337
if (hasNext) {
338338
row.pointTo(
339339
sortedIterator.getBaseObject, sortedIterator.getBaseOffset, numFields, schema)
340-
println("Returned row " + row)
341340
row
342341
} else {
343342
val rowDataCopy = new Array[Byte](sortedIterator.getRecordLength)

0 commit comments

Comments
 (0)