diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/PoolPage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/PoolPage.scala index 0a2bf31833d2..bcc9f70452c5 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/PoolPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/PoolPage.scala @@ -40,6 +40,7 @@ private[ui] class PoolPage(parent: JobProgressTab) extends WebUIPage("pool") { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } + val activeStagesTable = new StageTableBase(activeStages.sortBy(_.submissionTime).reverse, parent) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala index 683752ac9624..b99036ea4796 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala @@ -84,10 +84,10 @@ object TwitterAlgebirdCMS { if (rdd.count() != 0) { val partial = rdd.first() val partialTopK = partial.heavyHitters.map(id => - (id, partial.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK) + (id, partial.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.take(TOPK) globalCMS ++= partial val globalTopK = globalCMS.heavyHitters.map(id => - (id, globalCMS.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK) + (id, globalCMS.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.take(TOPK) println("Approx heavy hitters at %2.2f%% threshold this batch: %s".format(PERC, partialTopK.mkString("[", ",", "]"))) println("Approx heavy hitters at %2.2f%% threshold overall: %s".format(PERC, @@ -102,7 +102,7 @@ object TwitterAlgebirdCMS { {case (id, count) => (count, id)}) .sortByKey(ascending = false).take(TOPK) globalExact = mm.plus(globalExact.toMap, partialMap) - val globalTopK = globalExact.toSeq.sortBy(_._2).reverse.slice(0, TOPK) + val globalTopK = globalExact.toSeq.sortBy(_._2).reverse.take(TOPK) println("Exact heavy hitters this batch: %s".format(partialTopK.mkString("[", ",", "]"))) println("Exact heavy hitters overall: %s".format(globalTopK.mkString("[", ",", "]"))) } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala index 54854252d747..fd209169bda3 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala @@ -155,7 +155,7 @@ abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel] val intercept = if (addIntercept) weightsWithIntercept(weightsWithIntercept.size - 1) else 0.0 val weights = if (addIntercept) { - Vectors.dense(weightsWithIntercept.toArray.slice(0, weightsWithIntercept.size - 1)) + Vectors.dense(weightsWithIntercept.toArray.tail) } else { weightsWithIntercept } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala index b76fbe89c368..1b5a50017958 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala @@ -86,7 +86,7 @@ object MFDataGenerator { val mn = m * n val shuffled = rand.shuffle(1 to mn toList) - val omega = shuffled.slice(0, sampSize) + val omega = shuffled.take(sampSize) val ordered = omega.sortWith(_ < _).toArray val trainData: RDD[(Int, Int, Double)] = sc.parallelize(ordered) .map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1))) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala index 4bb022cf238a..49d2d55c5fcc 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala @@ -188,7 +188,7 @@ case class RangePartitioning(ordering: Seq[SortOrder], numPartitions: Int) override def satisfies(required: Distribution): Boolean = required match { case UnspecifiedDistribution => true case OrderedDistribution(requiredOrdering) => - val minSize = Seq(requiredOrdering.size, ordering.size).min + val minSize = math.min(requiredOrdering.size, ordering.size) requiredOrdering.take(minSize) == ordering.take(minSize) case ClusteredDistribution(requiredClustering) => clusteringSet.subsetOf(requiredClustering.toSet)