Skip to content

Commit af32f4a

Browse files
yhuairxin
authored andcommitted
[SPARK-15013][SQL] Remove hiveConf from HiveSessionState
## What changes were proposed in this pull request? The hiveConf in HiveSessionState is not actually used anymore. Let's remove it. ## How was this patch tested? Existing tests Author: Yin Huai <[email protected]> Closes #12786 from yhuai/removeHiveConf.
1 parent a04b1de commit af32f4a

File tree

2 files changed

+1
-30
lines changed

2 files changed

+1
-30
lines changed

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -45,31 +45,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
4545
*/
4646
lazy val metadataHive: HiveClient = sharedState.metadataHive.newSession()
4747

48-
/**
49-
* SQLConf and HiveConf contracts:
50-
*
51-
* 1. create a new o.a.h.hive.ql.session.SessionState for each HiveContext
52-
* 2. when the Hive session is first initialized, params in HiveConf will get picked up by the
53-
* SQLConf. Additionally, any properties set by set() or a SET command inside sql() will be
54-
* set in the SQLConf *as well as* in the HiveConf.
55-
*/
56-
lazy val hiveconf: HiveConf = {
57-
val initialConf = new HiveConf(
58-
sparkSession.sparkContext.hadoopConfiguration,
59-
classOf[org.apache.hadoop.hive.ql.session.SessionState])
60-
61-
// HiveConf is a Hadoop Configuration, which has a field of classLoader and
62-
// the initial value will be the current thread's context class loader
63-
// (i.e. initClassLoader at here).
64-
// We call initialConf.setClassLoader(initClassLoader) at here to make
65-
// this action explicit.
66-
initialConf.setClassLoader(sparkSession.sharedState.jarClassLoader)
67-
sparkSession.sparkContext.conf.getAll.foreach { case (k, v) =>
68-
initialConf.set(k, v)
69-
}
70-
initialConf
71-
}
72-
7348
setDefaultOverrideConfs()
7449

7550
/**
@@ -145,7 +120,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
145120
override def setConf(key: String, value: String): Unit = {
146121
super.setConf(key, value)
147122
metadataHive.runSqlHive(s"SET $key=$value")
148-
hiveconf.set(key, value)
149123
}
150124

151125
override def addJar(path: String): Unit = {

sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -153,9 +153,6 @@ private[hive] class TestHiveSparkSession(
153153
// By clearing the port we force Spark to pick a new one. This allows us to rerun tests
154154
// without restarting the JVM.
155155
System.clearProperty("spark.hostPort")
156-
CommandProcessorFactory.clean(sessionState.hiveconf)
157-
158-
sessionState.hiveconf.set("hive.plan.serialization.format", "javaXML")
159156

160157
// For some hive test case which contain ${system:test.tmp.dir}
161158
System.setProperty("test.tmp.dir", Utils.createTempDir().getCanonicalPath)
@@ -423,7 +420,7 @@ private[hive] class TestHiveSparkSession(
423420
foreach { udfName => FunctionRegistry.unregisterTemporaryUDF(udfName) }
424421

425422
// Some tests corrupt this value on purpose, which breaks the RESET call below.
426-
sessionState.hiveconf.set("fs.default.name", new File(".").toURI.toString)
423+
sessionState.conf.setConfString("fs.default.name", new File(".").toURI.toString)
427424
// It is important that we RESET first as broken hooks that might have been set could break
428425
// other sql exec here.
429426
sessionState.metadataHive.runSqlHive("RESET")

0 commit comments

Comments
 (0)