Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions core/src/main/scala/org/apache/spark/Logging.scala
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,17 @@ trait Logging {
// be serialized and used on another machine
@transient private var log_ : Logger = null

// Method to get the logger name for this object
protected def logName = {
// Ignore trailing $'s in the class names for Scala objects
this.getClass.getName.stripSuffix("$")
}

// Method to get or create the logger for this object
protected def log: Logger = {
if (log_ == null) {
initializeIfNecessary()
var className = this.getClass.getName
// Ignore trailing $'s in the class names for Scala objects
log_ = LoggerFactory.getLogger(className.stripSuffix("$"))
log_ = LoggerFactory.getLogger(logName)
}
log_
}
Expand Down
5 changes: 0 additions & 5 deletions sql/catalyst/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@
<artifactId>spark-core_${scala.binary.version}</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>scalalogging-slf4j_${scala.binary.version}</artifactId>
<version>1.0.1</version>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_${scala.binary.version}</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,12 @@ class Analyzer(catalog: Catalog, registry: FunctionRegistry, caseSensitive: Bool
object ResolveReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case q: LogicalPlan if q.childrenResolved =>
logger.trace(s"Attempting to resolve ${q.simpleString}")
logTrace(s"Attempting to resolve ${q.simpleString}")
q transformExpressions {
case u @ UnresolvedAttribute(name) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result = q.resolve(name).getOrElse(u)
logger.debug(s"Resolving $u to $result")
logDebug(s"Resolving $u to $result")
result
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ trait HiveTypeCoercion {
// Leave the same if the dataTypes match.
case Some(newType) if a.dataType == newType.dataType => a
case Some(newType) =>
logger.debug(s"Promoting $a to $newType in ${q.simpleString}}")
logDebug(s"Promoting $a to $newType in ${q.simpleString}}")
newType
}
}
Expand Down Expand Up @@ -154,7 +154,7 @@ trait HiveTypeCoercion {
(Alias(Cast(l, StringType), l.name)(), r)

case (l, r) if l.dataType != r.dataType =>
logger.debug(s"Resolving mismatched union input ${l.dataType}, ${r.dataType}")
logDebug(s"Resolving mismatched union input ${l.dataType}, ${r.dataType}")
findTightestCommonType(l.dataType, r.dataType).map { widestType =>
val newLeft =
if (l.dataType == widestType) l else Alias(Cast(l, widestType), l.name)()
Expand All @@ -170,15 +170,15 @@ trait HiveTypeCoercion {

val newLeft =
if (castedLeft.map(_.dataType) != left.output.map(_.dataType)) {
logger.debug(s"Widening numeric types in union $castedLeft ${left.output}")
logDebug(s"Widening numeric types in union $castedLeft ${left.output}")
Project(castedLeft, left)
} else {
left
}

val newRight =
if (castedRight.map(_.dataType) != right.output.map(_.dataType)) {
logger.debug(s"Widening numeric types in union $castedRight ${right.output}")
logDebug(s"Widening numeric types in union $castedRight ${right.output}")
Project(castedRight, right)
} else {
right
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.expressions

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.types._
import org.apache.spark.sql.catalyst.trees
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.expressions.codegen

import com.typesafe.scalalogging.slf4j.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.types.{StringType, NumericType}

Expand Down Expand Up @@ -92,7 +92,7 @@ object GenerateOrdering extends CodeGenerator[Seq[SortOrder], Ordering[Row]] wit
}
new $orderingName()
"""
logger.debug(s"Generated Ordering: $code")
logDebug(s"Generated Ordering: $code")
toolBox.eval(code).asInstanceOf[Ordering[Row]]
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,4 @@ package object catalyst {
*/
protected[catalyst] object ScalaReflectionLock

protected[catalyst] type Logging = com.typesafe.scalalogging.slf4j.Logging
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.planning

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.TreeNode

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.planning
import scala.annotation.tailrec

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._

Expand Down Expand Up @@ -184,7 +184,7 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper {

def unapply(plan: LogicalPlan): Option[ReturnType] = plan match {
case join @ Join(left, right, joinType, condition) =>
logger.debug(s"Considering join on: $condition")
logDebug(s"Considering join on: $condition")
// Find equi-join predicates that can be evaluated before the join, and thus can be used
// as join keys.
val (joinPredicates, otherPredicates) =
Expand All @@ -202,7 +202,7 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper {
val rightKeys = joinKeys.map(_._2)

if (joinKeys.nonEmpty) {
logger.debug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
logDebug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
Some((joinType, leftKeys, rightKeys, otherPredicates.reduceOption(And), left, right))
} else {
None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.rules

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.trees.TreeNode

abstract class Rule[TreeType <: TreeNode[_]] extends Logging {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.rules

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.catalyst.util.sideBySide

Expand Down Expand Up @@ -60,7 +60,7 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends Logging {
case (plan, rule) =>
val result = rule(plan)
if (!result.fastEquals(plan)) {
logger.trace(
logTrace(
s"""
|=== Applying Rule ${rule.ruleName} ===
|${sideBySide(plan.treeString, result.treeString).mkString("\n")}
Expand All @@ -73,26 +73,26 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends Logging {
if (iteration > batch.strategy.maxIterations) {
// Only log if this is a rule that is supposed to run more than once.
if (iteration != 2) {
logger.info(s"Max iterations (${iteration - 1}) reached for batch ${batch.name}")
logInfo(s"Max iterations (${iteration - 1}) reached for batch ${batch.name}")
}
continue = false
}

if (curPlan.fastEquals(lastPlan)) {
logger.trace(s"Fixed point reached for batch ${batch.name} after $iteration iterations.")
logTrace(s"Fixed point reached for batch ${batch.name} after $iteration iterations.")
continue = false
}
lastPlan = curPlan
}

if (!batchStartPlan.fastEquals(curPlan)) {
logger.debug(
logDebug(
s"""
|=== Result of Batch ${batch.name} ===
|${sideBySide(plan.treeString, curPlan.treeString).mkString("\n")}
""".stripMargin)
} else {
logger.trace(s"Batch ${batch.name} has no effect.")
logTrace(s"Batch ${batch.name} has no effect.")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

package org.apache.spark.sql.catalyst

import org.apache.spark.Logging

/**
* A library for easily manipulating trees of operators. Operators that extend TreeNode are
* granted the following interface:
Expand All @@ -31,8 +33,8 @@ package org.apache.spark.sql.catalyst
* <li>debugging support - pretty printing, easy splicing of trees, etc.</li>
* </ul>
*/
package object trees {
package object trees extends Logging {
// Since we want tree nodes to be lightweight, we create one logger for all treenode instances.
protected val logger =
com.typesafe.scalalogging.slf4j.Logger(org.slf4j.LoggerFactory.getLogger("catalyst.trees"))
protected override def logName = "catalyst.trees"

}
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.SparkStrategies
import org.apache.spark.sql.json._
import org.apache.spark.sql.parquet.ParquetRelation
import org.apache.spark.SparkContext
import org.apache.spark.{Logging, SparkContext}

/**
* :: AlphaComponent ::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ package org.apache.spark.sql.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.{Logging, Row}
import org.apache.spark.Logging
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.types.NativeType
import org.apache.spark.sql.columnar.{ColumnBuilder, NativeColumnBuilder}

Expand Down Expand Up @@ -101,7 +102,7 @@ private[sql] trait CompressibleColumnBuilder[T <: NativeType]

copyColumnHeader(rawBuffer, compressedBuffer)

logger.info(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
logInfo(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
encoder.compress(rawBuffer, compressedBuffer, columnType)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ private[sql] case class AddExchange(sqlContext: SQLContext) extends Rule[SparkPl
!operator.requiredChildDistribution.zip(operator.children).map {
case (required, child) =>
val valid = child.outputPartitioning.satisfies(required)
logger.debug(
logDebug(
s"${if (valid) "Valid" else "Invalid"} distribution," +
s"required: $required current: ${child.outputPartitioning}")
valid
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.types._
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.Logging
import org.apache.spark.Logging

private[sql] object JsonRDD extends Logging {

Expand Down
2 changes: 0 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ import org.apache.spark.annotation.DeveloperApi
*/
package object sql {

protected[sql] type Logging = com.typesafe.scalalogging.slf4j.Logging

/**
* :: DeveloperApi ::
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import java.sql.Timestamp

import org.scalatest.FunSuite

import org.apache.spark.sql.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.types._
import org.apache.spark.sql.columnar.ColumnarTestUtils._
import org.apache.spark.sql.execution.SparkSqlSerializer
Expand Down Expand Up @@ -166,7 +166,7 @@ class ColumnTypeSuite extends FunSuite with Logging {

buffer.rewind()
seq.foreach { expected =>
logger.info("buffer = " + buffer + ", expected = " + expected)
logInfo("buffer = " + buffer + ", expected = " + expected)
val extracted = columnType.extract(buffer)
assert(
expected === extracted,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService
import org.apache.hive.service.server.{HiveServer2, ServerOptionsProcessor}

import org.apache.spark.sql.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._

Expand All @@ -40,7 +40,7 @@ private[hive] object HiveThriftServer2 extends Logging {
val optionsProcessor = new ServerOptionsProcessor("HiveThriftServer2")

if (!optionsProcessor.process(args)) {
logger.warn("Error starting HiveThriftServer2 with given arguments")
logWarning("Error starting HiveThriftServer2 with given arguments")
System.exit(-1)
}

Expand All @@ -49,12 +49,12 @@ private[hive] object HiveThriftServer2 extends Logging {
// Set all properties specified via command line.
val hiveConf: HiveConf = ss.getConf
hiveConf.getAllProperties.toSeq.sortBy(_._1).foreach { case (k, v) =>
logger.debug(s"HiveConf var: $k=$v")
logDebug(s"HiveConf var: $k=$v")
}

SessionState.start(ss)

logger.info("Starting SparkContext")
logInfo("Starting SparkContext")
SparkSQLEnv.init()
SessionState.start(ss)

Expand All @@ -70,10 +70,10 @@ private[hive] object HiveThriftServer2 extends Logging {
val server = new HiveThriftServer2(SparkSQLEnv.hiveContext)
server.init(hiveConf)
server.start()
logger.info("HiveThriftServer2 started")
logInfo("HiveThriftServer2 started")
} catch {
case e: Exception =>
logger.error("Error starting HiveThriftServer2", e)
logError("Error starting HiveThriftServer2", e)
System.exit(-1)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.shims.ShimLoader
import org.apache.thrift.transport.TSocket

import org.apache.spark.sql.Logging
import org.apache.spark.Logging

private[hive] object SparkSQLCLIDriver {
private var prompt = "spark-sql"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.api.{FieldSchema, Schema}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse

import org.apache.spark.sql.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.hive.{HiveContext, HiveMetastoreTypes}

private[hive] class SparkSQLDriver(val context: HiveContext = SparkSQLEnv.hiveContext)
Expand All @@ -40,7 +40,7 @@ private[hive] class SparkSQLDriver(val context: HiveContext = SparkSQLEnv.hiveCo

private def getResultSetSchema(query: context.QueryExecution): Schema = {
val analyzed = query.analyzed
logger.debug(s"Result Schema: ${analyzed.output}")
logDebug(s"Result Schema: ${analyzed.output}")
if (analyzed.output.size == 0) {
new Schema(new FieldSchema("Response code", "string", "") :: Nil, null)
} else {
Expand All @@ -61,7 +61,7 @@ private[hive] class SparkSQLDriver(val context: HiveContext = SparkSQLEnv.hiveCo
new CommandProcessorResponse(0)
} catch {
case cause: Throwable =>
logger.error(s"Failed in [$command]", cause)
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(-3, ExceptionUtils.getFullStackTrace(cause), null)
}
}
Expand Down
Loading