Skip to content

Commit 2ace09f

Browse files
committed
address comments.
1 parent f9a8bdf commit 2ace09f

File tree

3 files changed

+4
-5
lines changed

3 files changed

+4
-5
lines changed

sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import java.util.Properties
2121

2222
import scala.collection.JavaConverters._
2323

24-
import org.apache.spark.Logging
2524
import org.apache.spark.annotation.Experimental
2625
import org.apache.spark.sql.catalyst.{CatalystQl, TableIdentifier}
2726
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
@@ -38,7 +37,7 @@ import org.apache.spark.sql.sources.HadoopFsRelation
3837
* @since 1.4.0
3938
*/
4039
@Experimental
41-
final class DataFrameWriter private[sql](df: DataFrame) extends Logging {
40+
final class DataFrameWriter private[sql](df: DataFrame) {
4241

4342
/**
4443
* Specifies the behavior when data or table already exists. Options include:

sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -778,7 +778,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
778778

779779
test("Saving information for sortBy and bucketBy columns") {
780780
val df = (1 to 10).map(i => (i, i + 1, s"str$i", s"str${i + 1}")).toDF("a", "b", "c", "d")
781-
val tableName = s"partitionInfo_${System.currentTimeMillis()}"
781+
val tableName = s"bucketingInfo_${System.currentTimeMillis()}"
782782

783783
withTable(tableName) {
784784
df.write

sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,8 @@ class BucketedWriteSuite extends QueryTest with SQLTestUtils with TestHiveSingle
163163

164164
test("write bucketed data with the overlapping bucketBy and partitionBy columns") {
165165
intercept[AnalysisException](df.write
166-
.partitionBy("i")
167-
.bucketBy(8, "i", "k")
166+
.partitionBy("i", "j")
167+
.bucketBy(8, "j", "k")
168168
.sortBy("k")
169169
.saveAsTable("bucketed_table"))
170170
}

0 commit comments

Comments
 (0)