Skip to content

Commit f803f41

Browse files
committed
Use SQLConf.CASE_SENSITIVE.key
1 parent e0a467c commit f803f41

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import org.scalatest.BeforeAndAfter
2727
import org.apache.spark.SparkException
2828
import org.apache.spark.sql.{Row, SaveMode}
2929
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
30+
import org.apache.spark.sql.internal.SQLConf
3031
import org.apache.spark.sql.test.SharedSQLContext
3132
import org.apache.spark.sql.types._
3233
import org.apache.spark.util.Utils
@@ -175,14 +176,14 @@ class JDBCWriteSuite extends SharedSQLContext with BeforeAndAfter {
175176

176177
df.write.jdbc(url, "TEST.APPENDTEST", new Properties())
177178

178-
withSQLConf("spark.sql.caseSensitive" -> "true") {
179+
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
179180
val m = intercept[SparkException] {
180181
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.APPENDTEST", new Properties())
181182
}.getMessage
182183
assert(m.contains("Column \"NAME\" not found"))
183184
}
184185

185-
withSQLConf("spark.sql.caseSensitive" -> "false") {
186+
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
186187
df2.write.mode(SaveMode.Append).jdbc(url, "TEST.APPENDTEST", new Properties())
187188
assert(3 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).count())
188189
assert(2 === spark.read.jdbc(url, "TEST.APPENDTEST", new Properties()).collect()(0).length)

0 commit comments

Comments
 (0)