Skip to content

Commit 8cd9cdf

Browse files
coreywoodfieldgatorsmile
authored andcommitted
[SPARK-21333][DOCS] Removed invalid joinTypes from javadoc of Dataset#joinWith
## What changes were proposed in this pull request? Two invalid join types were mistakenly listed in the javadoc for joinWith, in the Dataset class. I presume these were copied from the javadoc of join, but since joinWith returns a Dataset\<Tuple2\>, left_semi and left_anti are invalid, as they only return values from one of the datasets, instead of from both ## How was this patch tested? I ran the following code : ``` public static void main(String[] args) { SparkSession spark = new SparkSession(new SparkContext("local[*]", "Test")); Dataset<Row> one = spark.createDataFrame(Arrays.asList(new Bean(1), new Bean(2), new Bean(3), new Bean(4), new Bean(5)), Bean.class); Dataset<Row> two = spark.createDataFrame(Arrays.asList(new Bean(4), new Bean(5), new Bean(6), new Bean(7), new Bean(8), new Bean(9)), Bean.class); try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "inner").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "cross").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "outer").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "full").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "full_outer").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "left").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "left_outer").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "right").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "right_outer").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "left_semi").show();} catch (Exception e) {e.printStackTrace();} try {two.joinWith(one, one.col("x").equalTo(two.col("x")), "left_anti").show();} catch (Exception e) {e.printStackTrace();} } ``` which tests all the different join types, and the last two (left_semi and left_anti) threw exceptions. The same code using join instead of joinWith did fine. The Bean class was just a java bean with a single int field, x. Author: Corey Woodfield <[email protected]> Closes #18462 from coreywoodfield/master.
1 parent c42ef95 commit 8cd9cdf

File tree

2 files changed

+21
-1
lines changed

2 files changed

+21
-1
lines changed

sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -980,7 +980,7 @@ class Dataset[T] private[sql](
980980
* @param condition Join expression.
981981
* @param joinType Type of join to perform. Default `inner`. Must be one of:
982982
* `inner`, `cross`, `outer`, `full`, `full_outer`, `left`, `left_outer`,
983-
* `right`, `right_outer`, `left_semi`, `left_anti`.
983+
* `right`, `right_outer`.
984984
*
985985
* @group typedrel
986986
* @since 1.6.0
@@ -997,6 +997,10 @@ class Dataset[T] private[sql](
997997
JoinType(joinType),
998998
Some(condition.expr))).analyzed.asInstanceOf[Join]
999999

1000+
if (joined.joinType == LeftSemi || joined.joinType == LeftAnti) {
1001+
throw new AnalysisException("Invalid join type in joinWith: " + joined.joinType.sql)
1002+
}
1003+
10001004
// For both join side, combine all outputs into a single column and alias it with "_1" or "_2",
10011005
// to match the schema for the encoder of the join result.
10021006
// Note that we do this before joining them, to enable the join operator to return null for one

sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import java.io.{Externalizable, ObjectInput, ObjectOutput}
2121
import java.sql.{Date, Timestamp}
2222

2323
import org.apache.spark.sql.catalyst.encoders.{OuterScopes, RowEncoder}
24+
import org.apache.spark.sql.catalyst.plans.{LeftAnti, LeftSemi}
2425
import org.apache.spark.sql.catalyst.util.sideBySide
2526
import org.apache.spark.sql.execution.{LogicalRDD, RDDScanExec}
2627
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchange}
@@ -400,6 +401,21 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
400401
((("b", 2), ("b", 2)), ("b", 2)))
401402
}
402403

404+
test("joinWith join types") {
405+
val ds1 = Seq(1, 2, 3).toDS().as("a")
406+
val ds2 = Seq(1, 2).toDS().as("b")
407+
408+
val e1 = intercept[AnalysisException] {
409+
ds1.joinWith(ds2, $"a.value" === $"b.value", "left_semi")
410+
}.getMessage
411+
assert(e1.contains("Invalid join type in joinWith: " + LeftSemi.sql))
412+
413+
val e2 = intercept[AnalysisException] {
414+
ds1.joinWith(ds2, $"a.value" === $"b.value", "left_anti")
415+
}.getMessage
416+
assert(e2.contains("Invalid join type in joinWith: " + LeftAnti.sql))
417+
}
418+
403419
test("groupBy function, keys") {
404420
val ds = Seq(("a", 1), ("b", 1)).toDS()
405421
val grouped = ds.groupByKey(v => (1, v._2))

0 commit comments

Comments
 (0)