Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1256,8 +1256,12 @@ class DataFrame private[sql](
@scala.annotation.varargs
def drop(colNames: String*): DataFrame = {
val resolver = sqlContext.analyzer.resolver

val remainingCols =
schema.filter(f => colNames.forall(n => !resolver(f.name, n))).map(f => Column(f.name))
schema.filter(f => colNames.forall(n => !resolver(f.name, n)))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

super nit: i would consider breaking before the filter also if you have to break for the map. otherwise the filter looks special when really you are just chaining a bunch of operations.

.map(f => Column(UnresolvedAttribute.quoted(f.name)))


if (remainingCols.size == this.schema.size) {
this
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import org.apache.spark.sql.types._
class DataFrameSuite extends QueryTest with SharedSQLContext {
import testImplicits._


test("analysis error should be eagerly reported") {
// Eager analysis.
withSQLConf(SQLConf.DATAFRAME_EAGER_ANALYSIS.key -> "true") {
Expand Down Expand Up @@ -1291,4 +1292,11 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
Row(1, "a"))
}

test("SPARK-12987: drop column ") {
val df = Seq((1, 2)).toDF("a_b", "a.c")
val df1 = df.drop("a_b")
checkAnswer(df1, Row(2))
assert(df1.schema.map(_.name) === Seq("a.c"))
}
}