diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index deb1008c468b..14f6f658d9b7 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -43,7 +43,7 @@ import org.apache.spark.util.Utils
private[hive] object SparkSQLCLIDriver {
private var prompt = "spark-sql"
private var continuedPrompt = "".padTo(prompt.length, ' ')
- private var transport:TSocket = _
+ private var transport: TSocket = _
installSignalHandler()
@@ -276,13 +276,13 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
driver.init()
val out = sessionState.out
- val start:Long = System.currentTimeMillis()
+ val start: Long = System.currentTimeMillis()
if (sessionState.getIsVerbose) {
out.println(cmd)
}
val rc = driver.run(cmd)
val end = System.currentTimeMillis()
- val timeTaken:Double = (end - start) / 1000.0
+ val timeTaken: Double = (end - start) / 1000.0
ret = rc.getResponseCode
if (ret != 0) {
@@ -310,7 +310,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
res.clear()
}
} catch {
- case e:IOException =>
+ case e: IOException =>
console.printError(
s"""Failed with exception ${e.getClass.getName}: ${e.getMessage}
|${org.apache.hadoop.util.StringUtils.stringifyException(e)}
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
index 7c48ff4b35df..10c83d8b27a2 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
@@ -77,7 +77,7 @@ private[ui] class ThriftServerPage(parent: ThriftServerTab) extends WebUIPage(""
[{id}]
}
- val detail = if(info.state == ExecutionState.FAILED) info.detail else info.executePlan
+ val detail = if (info.state == ExecutionState.FAILED) info.detail else info.executePlan
| {info.userName} |
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
index e1466e042303..4c9fab7ef613 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
@@ -73,7 +73,7 @@ class UISeleniumSuite
}
ignore("thrift server ui test") {
- withJdbcStatement(statement =>{
+ withJdbcStatement { statement =>
val baseURL = s"http://localhost:$uiPort"
val queries = Seq(
@@ -97,6 +97,6 @@ class UISeleniumSuite
findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
}
}
- })
+ }
}
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
index 3f20c6142e59..7f8449cdc282 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
@@ -29,10 +29,10 @@ import org.apache.spark.sql.hive.execution.{AddJar, AddFile, HiveNativeCommand}
private[hive] class ExtendedHiveQlParser extends AbstractSparkSQLParser {
// Keyword is a convention with AbstractSparkSQLParser, which will scan all of the `Keyword`
// properties via reflection the class in runtime for constructing the SqlLexical object
- protected val ADD = Keyword("ADD")
- protected val DFS = Keyword("DFS")
+ protected val ADD = Keyword("ADD")
+ protected val DFS = Keyword("DFS")
protected val FILE = Keyword("FILE")
- protected val JAR = Keyword("JAR")
+ protected val JAR = Keyword("JAR")
protected lazy val start: Parser[LogicalPlan] = dfs | addJar | addFile | hiveQl
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 9ab98fdcce72..bfaa25232d0a 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -527,7 +527,7 @@ private[hive] object HiveContext {
val propMap: HashMap[String, String] = HashMap()
// We have to mask all properties in hive-site.xml that relates to metastore data source
// as we used a local metastore here.
- HiveConf.ConfVars.values().foreach { confvar =>
+ HiveConf.ConfVars.values().foreach { confvar =>
if (confvar.varname.contains("datanucleus") || confvar.varname.contains("jdo")) {
propMap.put(confvar.varname, confvar.defaultVal)
}
@@ -550,7 +550,7 @@ private[hive] object HiveContext {
}.mkString("{", ",", "}")
case (seq: Seq[_], ArrayType(typ, _)) =>
seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")
- case (map: Map[_,_], MapType(kType, vType, _)) =>
+ case (map: Map[_, _], MapType(kType, vType, _)) =>
map.map {
case (key, value) =>
toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType))
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 0a694c70e4e5..24cd33508263 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -335,7 +335,7 @@ private[hive] trait HiveInspectors {
val allRefs = si.getAllStructFieldRefs
new GenericRow(
allRefs.map(r =>
- unwrap(si.getStructFieldData(data,r), r.getFieldObjectInspector)).toArray)
+ unwrap(si.getStructFieldData(data, r), r.getFieldObjectInspector)).toArray)
}
@@ -561,8 +561,8 @@ private[hive] trait HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
- java.util.Arrays.asList(fields.map(f => f.name) :_*),
- java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) :_*))
+ java.util.Arrays.asList(fields.map(f => f.name) : _*),
+ java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) : _*))
}
/**
@@ -677,8 +677,8 @@ private[hive] trait HiveInspectors {
getListTypeInfo(elemType.toTypeInfo)
case StructType(fields) =>
getStructTypeInfo(
- java.util.Arrays.asList(fields.map(_.name) :_*),
- java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) :_*))
+ java.util.Arrays.asList(fields.map(_.name) : _*),
+ java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) : _*))
case MapType(keyType, valueType, _) =>
getMapTypeInfo(keyType.toTypeInfo, valueType.toTypeInfo)
case BinaryType => binaryTypeInfo
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 425a4005aa2c..07e9d84369f3 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -546,13 +546,17 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
- override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = ???
+ override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = {
+ throw new UnsupportedOperationException
+ }
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
- override def unregisterTable(tableIdentifier: Seq[String]): Unit = ???
+ override def unregisterTable(tableIdentifier: Seq[String]): Unit = {
+ throw new UnsupportedOperationException
+ }
override def unregisterAllTables(): Unit = {}
}
@@ -725,7 +729,7 @@ private[hive] case class MetastoreRelation
val output = attributes ++ partitionKeys
/** An attribute map that can be used to lookup original attributes based on expression id. */
- val attributeMap = AttributeMap(output.map(o => (o,o)))
+ val attributeMap = AttributeMap(output.map(o => (o, o)))
/** An attribute map for determining the ordinal for non-partition columns. */
val columnOrdinals = AttributeMap(attributes.zipWithIndex)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index 2cbb5ca4d2e0..3915ee835685 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -665,7 +665,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
HiveColumn(field.getName, field.getType, field.getComment)
})
}
- case Token("TOK_TABLEROWFORMAT", Token("TOK_SERDEPROPS", child :: Nil) :: Nil)=>
+ case Token("TOK_TABLEROWFORMAT", Token("TOK_SERDEPROPS", child :: Nil) :: Nil) =>
val serdeParams = new java.util.HashMap[String, String]()
child match {
case Token("TOK_TABLEROWFORMATFIELD", rowChild1 :: rowChild2) =>
@@ -775,7 +775,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
// Support "TRUNCATE TABLE table_name [PARTITION partition_spec]"
case Token("TOK_TRUNCATETABLE",
- Token("TOK_TABLE_PARTITION",table)::Nil) => NativePlaceholder
+ Token("TOK_TABLE_PARTITION", table) :: Nil) => NativePlaceholder
case Token("TOK_QUERY", queryArgs)
if Seq("TOK_FROM", "TOK_INSERT").contains(queryArgs.head.getText) =>
@@ -1151,7 +1151,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
case Seq(false, false) => Inner
}.toBuffer
- val joinedTables = tables.reduceLeft(Join(_,_, Inner, None))
+ val joinedTables = tables.reduceLeft(Join(_, _, Inner, None))
// Must be transform down.
val joinedResult = joinedTables transform {
@@ -1171,7 +1171,8 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
// worth the number of hacks that will be required to implement it. Namely, we need to add
// some sort of mapped star expansion that would expand all child output row to be similarly
// named output expressions where some aggregate expression has been applied (i.e. First).
- ??? // Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
+ // Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
+ throw new UnsupportedOperationException
case Token(allJoinTokens(joinToken),
relation1 ::
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
index 7a6ca48b54a2..8613332186f2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
@@ -194,10 +194,9 @@ case class InsertIntoHiveTable(
if (partition.nonEmpty) {
// loadPartition call orders directories created on the iteration order of the this map
- val orderedPartitionSpec = new util.LinkedHashMap[String,String]()
- table.hiveQlTable.getPartCols().foreach{
- entry=>
- orderedPartitionSpec.put(entry.getName,partitionSpec.get(entry.getName).getOrElse(""))
+ val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
+ table.hiveQlTable.getPartCols().foreach { entry =>
+ orderedPartitionSpec.put(entry.getName, partitionSpec.get(entry.getName).getOrElse(""))
}
val partVals = MetaStoreUtils.getPvals(table.hiveQlTable.getPartCols, partitionSpec)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index bfd26e0170c7..6f27a8626fc1 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -216,7 +216,7 @@ case class HiveScriptIOSchema (
val columnTypes = attrs.map {
case aref: AttributeReference => aref.dataType
case e: NamedExpression => e.dataType
- case _ => null
+ case _ => null
}
(columns, columnTypes)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 7ec4f7332502..bb116e3ab7de 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -315,7 +315,7 @@ private[hive] case class HiveWindowFunction(
// The object inspector of values returned from the Hive window function.
@transient
- protected lazy val returnInspector = {
+ protected lazy val returnInspector = {
evaluator.init(GenericUDAFEvaluator.Mode.COMPLETE, inputInspectors)
}
@@ -410,7 +410,7 @@ private[hive] case class HiveGenericUdaf(
protected lazy val resolver: AbstractGenericUDAFResolver = funcWrapper.createFunction()
@transient
- protected lazy val objectInspector = {
+ protected lazy val objectInspector = {
val parameterInfo = new SimpleGenericUDAFParameterInfo(inspectors.toArray, false, false)
resolver.getEvaluator(parameterInfo)
.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors.toArray)
@@ -443,7 +443,7 @@ private[hive] case class HiveUdaf(
new GenericUDAFBridge(funcWrapper.createFunction())
@transient
- protected lazy val objectInspector = {
+ protected lazy val objectInspector = {
val parameterInfo = new SimpleGenericUDAFParameterInfo(inspectors.toArray, false, false)
resolver.getEvaluator(parameterInfo)
.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors.toArray)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
index 50b209f7ccbb..2bb526b14be3 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
@@ -71,7 +71,7 @@ private[hive] class SparkHiveWriterContainer(
@transient protected lazy val jobContext = newJobContext(conf.value, jID.value)
@transient private lazy val taskContext = newTaskAttemptContext(conf.value, taID.value)
@transient private lazy val outputFormat =
- conf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef,Writable]]
+ conf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef, Writable]]
def driverSideSetup() {
setIDs(0, 0, 0)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 2e06cabfa80c..7c7afc824d7a 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -189,7 +189,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
}
}
- case class TestTable(name: String, commands: (()=>Unit)*)
+ case class TestTable(name: String, commands: (() => Unit)*)
protected[hive] implicit class SqlCmd(sql: String) {
def cmd: () => Unit = {
@@ -253,8 +253,8 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
| 'serialization.format'='${classOf[TBinaryProtocol].getName}'
|)
|STORED AS
- |INPUTFORMAT '${classOf[SequenceFileInputFormat[_,_]].getName}'
- |OUTPUTFORMAT '${classOf[SequenceFileOutputFormat[_,_]].getName}'
+ |INPUTFORMAT '${classOf[SequenceFileInputFormat[_, _]].getName}'
+ |OUTPUTFORMAT '${classOf[SequenceFileOutputFormat[_, _]].getName}'
""".stripMargin)
runSqlHive(
|