From b233d09977d3ca1afead55f8c8d6057c5643a500 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Sat, 22 Oct 2016 20:09:04 +0200 Subject: [PATCH 1/2] Backport "Use type-widened encoder for DataFrame rather than existing encoder to allow type-widening from set operations" --- .../scala/org/apache/spark/sql/Dataset.scala | 16 +++++++++++++--- .../org/apache/spark/sql/DataFrameSuite.scala | 16 ++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index 0b236a0c7466..4946bbe634d9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -1456,7 +1456,7 @@ class Dataset[T] private[sql]( * @group typedrel * @since 2.0.0 */ - def union(other: Dataset[T]): Dataset[T] = withTypedPlan { + def union(other: Dataset[T]): Dataset[T] = withSetOperator { // This breaks caching, but it's usually ok because it addresses a very specific use case: // using union to union many files or partitions. CombineUnions(Union(logicalPlan, other.logicalPlan)) @@ -1472,7 +1472,7 @@ class Dataset[T] private[sql]( * @group typedrel * @since 1.6.0 */ - def intersect(other: Dataset[T]): Dataset[T] = withTypedPlan { + def intersect(other: Dataset[T]): Dataset[T] = withSetOperator { Intersect(logicalPlan, other.logicalPlan) } @@ -1486,7 +1486,7 @@ class Dataset[T] private[sql]( * @group typedrel * @since 2.0.0 */ - def except(other: Dataset[T]): Dataset[T] = withTypedPlan { + def except(other: Dataset[T]): Dataset[T] = withSetOperator { Except(logicalPlan, other.logicalPlan) } @@ -2607,4 +2607,14 @@ class Dataset[T] private[sql]( @inline private def withTypedPlan[U : Encoder](logicalPlan: => LogicalPlan): Dataset[U] = { Dataset(sparkSession, logicalPlan) } + + /** A convenient function to wrap a set based logical plan and produce a Dataset. */ + @inline private def withSetOperator[U : Encoder](logicalPlan: => LogicalPlan): Dataset[U] = { + if (classTag.runtimeClass.isAssignableFrom(classOf[Row])) { + // Set operators widen types (change the schema), so we cannot reuse the row encoder. + Dataset.ofRows(sparkSession, logicalPlan).asInstanceOf[Dataset[U]] + } else { + Dataset(sparkSession, logicalPlan) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 7ab0fe07b9c4..6da3c9c96abc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -19,6 +19,7 @@ package org.apache.spark.sql import java.io.File import java.nio.charset.StandardCharsets +import java.sql.{Date, Timestamp} import java.util.UUID import scala.language.postfixOps @@ -1585,4 +1586,19 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { } } } + + test("SPARK-17123: Performing set operations that combine non-scala native types") { + val dates = Seq( + (BigDecimal.valueOf(1), new Timestamp(2)), + (BigDecimal.valueOf(4), new Timestamp(5)) + ).toDF("timestamp", "decimal") + + val widenTypedRows = Seq( + (10.5D, "string") + ).toDF("timestamp", "decimal") + + dates.union(widenTypedRows).collect() + dates.except(widenTypedRows).collect() + dates.intersect(widenTypedRows).collect() + } } From 9154463561c20b26c92e145e4b3039239fb46f5e Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Sun, 23 Oct 2016 19:07:40 +0900 Subject: [PATCH 2/2] Fix field anmes in the test --- .../src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 6da3c9c96abc..f8d7ddde85d9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1591,11 +1591,11 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { val dates = Seq( (BigDecimal.valueOf(1), new Timestamp(2)), (BigDecimal.valueOf(4), new Timestamp(5)) - ).toDF("timestamp", "decimal") + ).toDF("decimal", "timestamp") val widenTypedRows = Seq( (10.5D, "string") - ).toDF("timestamp", "decimal") + ).toDF("decimal", "timestamp") dates.union(widenTypedRows).collect() dates.except(widenTypedRows).collect()