Skip to content

Commit a38e23c

Browse files
committed
[SQL] Make dataframe more tolerant of being serialized
Eases use in the spark-shell. Author: Michael Armbrust <[email protected]> Closes #4545 from marmbrus/serialization and squashes the following commits: 04748e6 [Michael Armbrust] @scala.annotation.varargs b36e219 [Michael Armbrust] moreFixes
1 parent d931b01 commit a38e23c

File tree

4 files changed

+15
-4
lines changed

4 files changed

+15
-4
lines changed

sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ private[sql] object DataFrame {
7676
*/
7777
// TODO: Improve documentation.
7878
@Experimental
79-
trait DataFrame extends RDDApi[Row] {
79+
trait DataFrame extends RDDApi[Row] with Serializable {
8080

8181
val sqlContext: SQLContext
8282

sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ import org.apache.spark.sql.types.{NumericType, StructType}
4444
* Internal implementation of [[DataFrame]]. Users of the API should use [[DataFrame]] directly.
4545
*/
4646
private[sql] class DataFrameImpl protected[sql](
47-
override val sqlContext: SQLContext,
48-
val queryExecution: SQLContext#QueryExecution)
47+
@transient override val sqlContext: SQLContext,
48+
@transient val queryExecution: SQLContext#QueryExecution)
4949
extends DataFrame {
5050

5151
/**

sql/core/src/main/scala/org/apache/spark/sql/Dsl.scala

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,15 @@ object Dsl {
166166
//////////////////////////////////////////////////////////////////////////////////////////////
167167
//////////////////////////////////////////////////////////////////////////////////////////////
168168

169+
/**
170+
* Returns the first column that is not null.
171+
* {{{
172+
* df.select(coalesce(df("a"), df("b")))
173+
* }}}
174+
*/
175+
@scala.annotation.varargs
176+
def coalesce(e: Column*): Column = Coalesce(e.map(_.expr))
177+
169178
/**
170179
* Unary minus, i.e. negate the expression.
171180
* {{{

sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,9 +129,11 @@ class SQLContext(@transient val sparkContext: SparkContext)
129129
* A collection of methods that are considered experimental, but can be used to hook into
130130
* the query planner for advanced functionalities.
131131
*/
132+
@transient
132133
val experimental: ExperimentalMethods = new ExperimentalMethods(this)
133134

134135
/** Returns a [[DataFrame]] with no rows or columns. */
136+
@transient
135137
lazy val emptyDataFrame = DataFrame(this, NoRelation)
136138

137139
/**
@@ -178,7 +180,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
178180
* (Scala-specific)
179181
* Implicit methods available in Scala for converting common Scala objects into [[DataFrame]]s.
180182
*/
181-
object implicits {
183+
object implicits extends Serializable {
182184
// scalastyle:on
183185

184186
/** Creates a DataFrame from an RDD of case classes or tuples. */

0 commit comments

Comments
 (0)