Skip to content

Commit 1d413ce

Browse files
committed
fixed checkstyle issues
1 parent 9ee94ee commit 1d413ce

File tree

2 files changed

+11
-8
lines changed

2 files changed

+11
-8
lines changed

core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -266,10 +266,10 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
266266
}
267267

268268
val combOp = (r1: Result, r2: Result) => {
269-
//take union of both key sets in case one partion doesn't contain all keys
269+
// take union of both key sets in case one partion doesn't contain all keys
270270
val keyUnion = r1.resultMap.keys.toSet.union(r2.resultMap.keys.toSet)
271271

272-
//Use r2 to keep the combined result since r1 is usual empty
272+
// Use r2 to keep the combined result since r1 is usual empty
273273
for (key <- keyUnion) {
274274
val entry1 = r1.resultMap.get(key)
275275
val entry2 = r2.resultMap.get(key)
@@ -286,7 +286,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
286286

287287
val zeroU = new Result(Map[K, Stratum]())
288288

289-
//determine threshold for each stratum and resample
289+
// determine threshold for each stratum and resample
290290
val finalResult = self.aggregateWithContext(zeroU)(seqOp, combOp).resultMap
291291
val thresholdByKey = new mutable.HashMap[K, Double]()
292292
for ((key, stratum) <- finalResult) {
@@ -330,7 +330,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
330330
// Bernoulli sampler
331331
self.mapPartitionsWithIndex((idx: Int, iter: Iterator[(K, V)]) => {
332332
val random = new RandomDataGenerator
333-
random.reSeed(seed+idx)
333+
random.reSeed(seed + idx)
334334
iter.filter(t => random.nextUniform(0.0, 1.0) < thresholdByKey.get(t._1).get)
335335
}, preservesPartitioning = true)
336336
}

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -884,15 +884,18 @@ abstract class RDD[T: ClassTag](
884884
* A version of {@link #aggregate()} that passes the TaskContext to the function that does
885885
* aggregation for each partition.
886886
*/
887-
def aggregateWithContext[U: ClassTag](zeroValue: U)(seqOp: ((TaskContext, U), T) => U, combOp: (U, U) => U): U = {
887+
def aggregateWithContext[U: ClassTag](zeroValue: U)(seqOp: ((TaskContext, U), T) => U,
888+
combOp: (U, U) => U): U = {
888889
// Clone the zero value since we will also be serializing it as part of tasks
889890
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
890-
//pad seqOp and combOp with taskContext to conform to aggregate's signature in TraversableOnce
891+
// pad seqOp and combOp with taskContext to conform to aggregate's signature in TraversableOnce
891892
val paddedSeqOp = (arg1: (TaskContext, U), item: T) => (arg1._1, seqOp(arg1, item))
892-
val paddedcombOp = (arg1 : (TaskContext, U), arg2: (TaskContext, U)) => (arg1._1, combOp(arg1._2, arg1._2))
893+
val paddedcombOp = (arg1 : (TaskContext, U), arg2: (TaskContext, U)) =>
894+
(arg1._1, combOp(arg1._2, arg1._2))
893895
val cleanSeqOp = sc.clean(paddedSeqOp)
894896
val cleanCombOp = sc.clean(paddedcombOp)
895-
val aggregatePartition = (tc: TaskContext, it: Iterator[T]) => (it.aggregate(tc, zeroValue)(cleanSeqOp, cleanCombOp))._2
897+
val aggregatePartition = (tc: TaskContext, it: Iterator[T]) =>
898+
(it.aggregate(tc, zeroValue)(cleanSeqOp, cleanCombOp))._2
896899
val mergeResult = (index: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
897900
sc.runJob(this, aggregatePartition, mergeResult)
898901
jobResult

0 commit comments

Comments
 (0)