Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ package org.apache.spark.sql.hive.thriftserver
import java.io._
import java.util.{ArrayList => JArrayList, Locale}

import org.apache.spark.sql.AnalysisException

import scala.collection.JavaConverters._

import jline.console.ConsoleReader
Expand Down Expand Up @@ -298,6 +300,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {

driver.init()
val out = sessionState.out
val err = sessionState.err
val start: Long = System.currentTimeMillis()
if (sessionState.getIsVerbose) {
out.println(cmd)
Expand All @@ -308,7 +311,12 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {

ret = rc.getResponseCode
if (ret != 0) {
console.printError(rc.getErrorMessage())
// For analysis exception, only the error is printed out to the console.
rc.getException() match {
case e : AnalysisException =>
err.println(s"""Error in query: ${e.getMessage}""")
case _ => err.println(rc.getErrorMessage())
}
driver.close()
return ret
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
package org.apache.spark.sql.hive.thriftserver

import java.util.{Arrays, ArrayList => JArrayList, List => JList}
import org.apache.log4j.LogManager
import org.apache.spark.sql.AnalysisException

import scala.collection.JavaConverters._

Expand Down Expand Up @@ -63,9 +65,12 @@ private[hive] class SparkSQLDriver(
tableSchema = getResultSetSchema(execution)
new CommandProcessorResponse(0)
} catch {
case cause: Throwable =>
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(cause), null)
case ae: AnalysisException =>
logDebug(s"Failed in [$command]", ae)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(ae), null, ae)
case cause: Throwable =>
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(cause), null, cause)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class CliSuite extends SparkFunSuite with BeforeAndAfter with Logging {
* @param timeout maximum time for the commands to complete
* @param extraArgs any extra arguments
* @param errorResponses a sequence of strings whose presence in the stdout of the forked process
* is taken as an immediate error condition. That is: if a line beginning
* is taken as an immediate error condition. That is: if a line containing
* with one of these strings is found, fail the test immediately.
* The default value is `Seq("Error:")`
*
Expand Down Expand Up @@ -104,7 +104,7 @@ class CliSuite extends SparkFunSuite with BeforeAndAfter with Logging {
}
} else {
errorResponses.foreach { r =>
if (line.startsWith(r)) {
if (line.contains(r)) {
foundAllExpectedAnswers.tryFailure(
new RuntimeException(s"Failed with error line '$line'"))
}
Expand Down Expand Up @@ -219,4 +219,12 @@ class CliSuite extends SparkFunSuite with BeforeAndAfter with Logging {
-> "OK"
)
}

test("SPARK-11188 Analysis error reporting") {
runCliWithin(timeout = 2.minute,
errorResponses = Seq("AnalysisException"))(
"select * from nonexistent_table;"
-> "Error in query: Table not found: nonexistent_table;"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is only testing that you message is there, not that the stack traces are elided. There is still a giant stack trace upon failure:

spark-sql> SELECT * FROM test;
15/10/26 10:34:27 INFO ParseDriver: Parsing command: SELECT * FROM test
15/10/26 10:34:27 INFO ParseDriver: Parse Completed
15/10/26 10:34:27 INFO HiveMetaStore: 0: get_table : db=default tbl=test
15/10/26 10:34:27 INFO audit: ugi=marmbrus  ip=unknown-ip-addr  cmd=get_table : db=default tbl=test 
15/10/26 10:34:27 ERROR SparkSQLDriver: Failed in [SELECT * FROM test]
org.apache.spark.sql.AnalysisException: no such table test; line 1 pos 14
    at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:260)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:268)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$7.applyOrElse(Analyzer.scala:264)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
    at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:249)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:279)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:264)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:254)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:83)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:80)
    at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111)
    at scala.collection.immutable.List.foldLeft(List.scala:84)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:80)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:72)
    at scala.collection.immutable.List.foreach(List.scala:318)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
    at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:41)
    at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:41)
    at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:39)
    at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:132)
    at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:51)
    at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:783)
    at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:61)
    at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:308)
    at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
    at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:226)
    at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:680)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Error in query: no such table test; line 1 pos 14

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@marmbrus
Thanks for your quick feedback. Actually this is one of the question i was asking earlier.

  1. The actual exception is also logged in run method of the SparkSQLDriver. If the
    logging is set to CONSOLE then the exception is printed on the screen. I hope this is ok ?

I think by default the logging is enabled to console or stdout. If i change the log4j.properties and direct the logging to a file , then we should not see a stack trace.

Michael, do you want this logging completely suppressed for AnalysisException ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@marmbrus
Hi Michael, given that logging to console is default , i have made changes to suppress the logging to console for analysis exceptions. One option is to completely stop logging these kind of exceptions. But i have tried to log them to non-console appenders. Please let me know what you think.

)
}
}