Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>
</dependency>
<dependency>
<groupId>com.typesafe.scala-logging</groupId>
<artifactId>scala-logging-slf4j_${scala.binary.version}</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
Expand Down
39 changes: 24 additions & 15 deletions core/src/main/scala/org/apache/spark/Logging.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@
package org.apache.spark

import org.apache.log4j.{LogManager, PropertyConfigurator}
import org.slf4j.{Logger, LoggerFactory}
import org.slf4j.LoggerFactory
import org.slf4j.impl.StaticLoggerBinder
import com.typesafe.scalalogging.slf4j.Logger

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils
Expand All @@ -39,61 +40,69 @@ trait Logging {
// be serialized and used on another machine
@transient private var log_ : Logger = null

// Method to get the logger name for this object
protected def logName = {
var className = this.getClass.getName
// Ignore trailing $'s in the class names for Scala objects
if (className.endsWith("$")) {
className = className.substring(0, className.length - 1)
}
className
}

// Method to get or create the logger for this object
protected def log: Logger = {
if (log_ == null) {
initializeIfNecessary()
var className = this.getClass.getName
// Ignore trailing $'s in the class names for Scala objects
log_ = LoggerFactory.getLogger(className.stripSuffix("$"))
log_ = Logger(LoggerFactory.getLogger(logName))
}
log_
}

// Log methods that take only a String
protected def logInfo(msg: => String) {
if (log.isInfoEnabled) log.info(msg)
log.info(msg)
}

protected def logDebug(msg: => String) {
if (log.isDebugEnabled) log.debug(msg)
log.debug(msg)
}

protected def logTrace(msg: => String) {
if (log.isTraceEnabled) log.trace(msg)
log.trace(msg)
}

protected def logWarning(msg: => String) {
if (log.isWarnEnabled) log.warn(msg)
log.warn(msg)
}

protected def logError(msg: => String) {
if (log.isErrorEnabled) log.error(msg)
log.error(msg)
}

// Log methods that take Throwables (Exceptions/Errors) too
protected def logInfo(msg: => String, throwable: Throwable) {
if (log.isInfoEnabled) log.info(msg, throwable)
log.info(msg, throwable)
}

protected def logDebug(msg: => String, throwable: Throwable) {
if (log.isDebugEnabled) log.debug(msg, throwable)
log.debug(msg, throwable)
}

protected def logTrace(msg: => String, throwable: Throwable) {
if (log.isTraceEnabled) log.trace(msg, throwable)
log.trace(msg, throwable)
}

protected def logWarning(msg: => String, throwable: Throwable) {
if (log.isWarnEnabled) log.warn(msg, throwable)
log.warn(msg, throwable)
}

protected def logError(msg: => String, throwable: Throwable) {
if (log.isErrorEnabled) log.error(msg, throwable)
log.error(msg, throwable)
}

protected def isTraceEnabled(): Boolean = {
log.isTraceEnabled
log.underlying.isTraceEnabled
}

private def initializeIfNecessary() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
package org.apache.spark.util

import org.apache.commons.lang3.SystemUtils
import org.slf4j.Logger
import com.typesafe.scalalogging.slf4j.Logger
import sun.misc.{Signal, SignalHandler}

/**
Expand Down
4 changes: 4 additions & 0 deletions mllib/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@
<artifactId>breeze_${scala.binary.version}</artifactId>
<version>0.7</version>
<exclusions>
<exclusion>
<groupId>com.typesafe</groupId>
<artifactId>scalalogging-slf4j_${scala.binary.version}</artifactId>
</exclusion>
<!-- This is included as a compile-scoped dependency by jtransforms, which is
a dependency of breeze. -->
<exclusion>
Expand Down
5 changes: 5 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,11 @@
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>com.typesafe.scala-logging</groupId>
<artifactId>scala-logging-slf4j_${scala.binary.version}</artifactId>
<version>2.1.2</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jul-to-slf4j</artifactId>
Expand Down
91 changes: 89 additions & 2 deletions project/MimaExcludes.scala
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,101 @@ object MimaExcludes {
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.mllib.tree.impurity.Variance.calculate")
) ++
Seq ( // Package-private classes removed in SPARK-2341
Seq( // Package-private classes removed in SPARK-2341
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.BinaryLabelParser"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.BinaryLabelParser$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.LabelParser"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.LabelParser$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.MulticlassLabelParser"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.MulticlassLabelParser$")
)
) ++
Seq(
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.bagel.Bagel.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.StreamingContext.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.dstream.DStream.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.mllib.recommendation.ALS.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.mllib.clustering.KMeans.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.mllib.classification.NaiveBayes.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.kafka.KafkaReceiver.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.SparkContext.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.rdd.PairRDDFunctions.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.rdd.OrderedRDDFunctions.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.rdd.SequenceFileRDDFunctions.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.rdd.DoubleRDDFunctions.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.twitter.TwitterReceiver.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.zeromq.ZeroMQReceiver.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.flume.FlumeReceiver.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.rdd.RDD.log"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.SparkConf.log"),

ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.SparkConf.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.StreamingContext.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.dstream.DStream.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.mllib.recommendation.ALS.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.mllib.clustering.KMeans.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.mllib.classification.NaiveBayes.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.zeromq.ZeroMQReceiver.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.SparkContext.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.rdd.RDD.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.rdd.SequenceFileRDDFunctions.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.rdd.OrderedRDDFunctions.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.rdd.DoubleRDDFunctions.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.flume.FlumeReceiver.org$apache$spark$Logging$$log__="),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.zeromq.ZeroMQReceiver.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.flume.FlumeReceiver.org$apache$spark$Logging$$log_"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log_")
)
case v if v.startsWith("1.0") =>
Seq(
MimaBuild.excludeSparkPackage("api.java"),
Expand Down
5 changes: 0 additions & 5 deletions sql/catalyst/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@
<artifactId>spark-core_${scala.binary.version}</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>scalalogging-slf4j_${scala.binary.version}</artifactId>
<version>1.0.1</version>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_${scala.binary.version}</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,12 @@ class Analyzer(catalog: Catalog, registry: FunctionRegistry, caseSensitive: Bool
object ResolveReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case q: LogicalPlan if q.childrenResolved =>
logger.trace(s"Attempting to resolve ${q.simpleString}")
log.trace(s"Attempting to resolve ${q.simpleString}")
q transformExpressions {
case u @ UnresolvedAttribute(name) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result = q.resolve(name).getOrElse(u)
logger.debug(s"Resolving $u to $result")
log.debug(s"Resolving $u to $result")
result
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ trait HiveTypeCoercion {
// Leave the same if the dataTypes match.
case Some(newType) if a.dataType == newType.dataType => a
case Some(newType) =>
logger.debug(s"Promoting $a to $newType in ${q.simpleString}}")
log.debug(s"Promoting $a to $newType in ${q.simpleString}}")
newType
}
}
Expand Down Expand Up @@ -154,7 +154,7 @@ trait HiveTypeCoercion {
(Alias(Cast(l, StringType), l.name)(), r)

case (l, r) if l.dataType != r.dataType =>
logger.debug(s"Resolving mismatched union input ${l.dataType}, ${r.dataType}")
log.debug(s"Resolving mismatched union input ${l.dataType}, ${r.dataType}")
findTightestCommonType(l.dataType, r.dataType).map { widestType =>
val newLeft =
if (l.dataType == widestType) l else Alias(Cast(l, widestType), l.name)()
Expand All @@ -170,15 +170,15 @@ trait HiveTypeCoercion {

val newLeft =
if (castedLeft.map(_.dataType) != left.output.map(_.dataType)) {
logger.debug(s"Widening numeric types in union $castedLeft ${left.output}")
log.debug(s"Widening numeric types in union $castedLeft ${left.output}")
Project(castedLeft, left)
} else {
left
}

val newRight =
if (castedRight.map(_.dataType) != right.output.map(_.dataType)) {
logger.debug(s"Widening numeric types in union $castedRight ${right.output}")
log.debug(s"Widening numeric types in union $castedRight ${right.output}")
Project(castedRight, right)
} else {
right
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.expressions

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.types._
import org.apache.spark.sql.catalyst.trees
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.expressions.codegen

import com.typesafe.scalalogging.slf4j.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.types.{StringType, NumericType}

Expand Down Expand Up @@ -92,7 +92,7 @@ object GenerateOrdering extends CodeGenerator[Seq[SortOrder], Ordering[Row]] wit
}
new $orderingName()
"""
logger.debug(s"Generated Ordering: $code")
log.debug(s"Generated Ordering: $code")
toolBox.eval(code).asInstanceOf[Ordering[Row]]
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,4 @@ package object catalyst {
*/
protected[catalyst] object ScalaReflectionLock

protected[catalyst] type Logging = com.typesafe.scalalogging.slf4j.Logging
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.planning

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.TreeNode

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.planning
import scala.annotation.tailrec

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._

Expand Down Expand Up @@ -184,7 +184,7 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper {

def unapply(plan: LogicalPlan): Option[ReturnType] = plan match {
case join @ Join(left, right, joinType, condition) =>
logger.debug(s"Considering join on: $condition")
log.debug(s"Considering join on: $condition")
// Find equi-join predicates that can be evaluated before the join, and thus can be used
// as join keys.
val (joinPredicates, otherPredicates) =
Expand All @@ -202,7 +202,7 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper {
val rightKeys = joinKeys.map(_._2)

if (joinKeys.nonEmpty) {
logger.debug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
log.debug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
Some((joinType, leftKeys, rightKeys, otherPredicates.reduceOption(And), left, right))
} else {
None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.rules

import org.apache.spark.sql.catalyst.Logging
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.trees.TreeNode

abstract class Rule[TreeType <: TreeNode[_]] extends Logging {
Expand Down
Loading