Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
Original file line number Diff line number Diff line change
Expand Up @@ -908,6 +908,20 @@ class DataFrame private[sql](
schema, needsConversion = false)
}

/**
* Returns a new [[DataFrame]] that has exactly `numPartitions` partitions.
* Similar to coalesce defined on an [[RDD]], this operation results in a narrow dependency, e.g.
* if you go from 1000 partitions to 100 partitions, there will not be a shuffle, instead each of
* the 100 new partitions will claim 10 of the current partitions.
* @group rdd
*/
override def coalesce(numPartitions: Int): DataFrame = {
sqlContext.createDataFrame(
queryExecution.toRdd.coalesce(numPartitions),
schema,
needsConversion = false)
}

/**
* Returns a new [[DataFrame]] that contains only the unique rows from this [[DataFrame]].
* @group dfops
Expand Down
2 changes: 2 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/RDDApi.scala
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,7 @@ private[sql] trait RDDApi[T] {

def repartition(numPartitions: Int): DataFrame

def coalesce(numPartitions: Int): DataFrame

def distinct: DataFrame
}
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,14 @@ class DataFrameSuite extends QueryTest {
testData.select('key).collect().toSeq)
}

test("coalesce") {
assert(testData.select('key).coalesce(1).rdd.partitions.size === 1)

checkAnswer(
testData.select('key).coalesce(1).select('key),
testData.select('key).collect().toSeq)
}

test("groupBy") {
checkAnswer(
testData2.groupBy("a").agg($"a", sum($"b")),
Expand Down