Skip to content

Commit c1bc4f4

Browse files
Luc Bourliersrowen
authored andcommitted
[SPARK-10227] fatal warnings with sbt on Scala 2.11
The bulk of the changes are on `transient` annotation on class parameter. Often the compiler doesn't generate a field for this parameters, so the the transient annotation would be unnecessary. But if the class parameter are used in methods, then fields are created. So it is safer to keep the annotations. The remainder are some potential bugs, and deprecated syntax. Author: Luc Bourlier <[email protected]> Closes #8433 from skyluc/issue/sbt-2.11.
1 parent 91a577d commit c1bc4f4

File tree

60 files changed

+158
-151
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+158
-151
lines changed

core/src/main/scala/org/apache/spark/Accumulators.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ import org.apache.spark.util.Utils
4747
* @tparam T partial data that can be added in
4848
*/
4949
class Accumulable[R, T] private[spark] (
50-
@transient initialValue: R,
50+
initialValue: R,
5151
param: AccumulableParam[R, T],
5252
val name: Option[String],
5353
internal: Boolean)

core/src/main/scala/org/apache/spark/Dependency.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ abstract class NarrowDependency[T](_rdd: RDD[T]) extends Dependency[T] {
6666
*/
6767
@DeveloperApi
6868
class ShuffleDependency[K, V, C](
69-
@transient _rdd: RDD[_ <: Product2[K, V]],
69+
@transient private val _rdd: RDD[_ <: Product2[K, V]],
7070
val partitioner: Partitioner,
7171
val serializer: Option[Serializer] = None,
7272
val keyOrdering: Option[Ordering[K]] = None,

core/src/main/scala/org/apache/spark/Partitioner.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ class HashPartitioner(partitions: Int) extends Partitioner {
104104
* the value of `partitions`.
105105
*/
106106
class RangePartitioner[K : Ordering : ClassTag, V](
107-
@transient partitions: Int,
108-
@transient rdd: RDD[_ <: Product2[K, V]],
107+
partitions: Int,
108+
rdd: RDD[_ <: Product2[K, V]],
109109
private var ascending: Boolean = true)
110110
extends Partitioner {
111111

core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ import org.apache.spark.util.SerializableJobConf
3737
* a filename to write to, etc, exactly like in a Hadoop MapReduce job.
3838
*/
3939
private[spark]
40-
class SparkHadoopWriter(@transient jobConf: JobConf)
40+
class SparkHadoopWriter(jobConf: JobConf)
4141
extends Logging
4242
with SparkHadoopMapRedUtil
4343
with Serializable {

core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ import org.apache.spark.util.{SerializableConfiguration, Utils}
4141
import scala.util.control.NonFatal
4242

4343
private[spark] class PythonRDD(
44-
@transient parent: RDD[_],
44+
parent: RDD[_],
4545
command: Array[Byte],
4646
envVars: JMap[String, String],
4747
pythonIncludes: JList[String],
@@ -785,7 +785,7 @@ class BytesToString extends org.apache.spark.api.java.function.Function[Array[By
785785
* Internal class that acts as an `AccumulatorParam` for Python accumulators. Inside, it
786786
* collects a list of pickled strings that we pass to Python through a socket.
787787
*/
788-
private class PythonAccumulatorParam(@transient serverHost: String, serverPort: Int)
788+
private class PythonAccumulatorParam(@transient private val serverHost: String, serverPort: Int)
789789
extends AccumulatorParam[JList[Array[Byte]]] {
790790

791791
Utils.checkHost(serverHost, "Expected hostname")

core/src/main/scala/org/apache/spark/input/PortableDataStream.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -131,8 +131,8 @@ private[spark] class StreamInputFormat extends StreamFileInputFormat[PortableDat
131131
*/
132132
@Experimental
133133
class PortableDataStream(
134-
@transient isplit: CombineFileSplit,
135-
@transient context: TaskAttemptContext,
134+
isplit: CombineFileSplit,
135+
context: TaskAttemptContext,
136136
index: Integer)
137137
extends Serializable {
138138

core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ class NettyBlockTransferService(conf: SparkConf, securityManager: SecurityManage
137137
new RpcResponseCallback {
138138
override def onSuccess(response: Array[Byte]): Unit = {
139139
logTrace(s"Successfully uploaded block $blockId")
140-
result.success()
140+
result.success((): Unit)
141141
}
142142
override def onFailure(e: Throwable): Unit = {
143143
logError(s"Error while uploading block $blockId", e)

core/src/main/scala/org/apache/spark/rdd/BinaryFileRDD.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,18 +28,18 @@ private[spark] class BinaryFileRDD[T](
2828
inputFormatClass: Class[_ <: StreamFileInputFormat[T]],
2929
keyClass: Class[String],
3030
valueClass: Class[T],
31-
@transient conf: Configuration,
31+
conf: Configuration,
3232
minPartitions: Int)
3333
extends NewHadoopRDD[String, T](sc, inputFormatClass, keyClass, valueClass, conf) {
3434

3535
override def getPartitions: Array[Partition] = {
3636
val inputFormat = inputFormatClass.newInstance
3737
inputFormat match {
3838
case configurable: Configurable =>
39-
configurable.setConf(conf)
39+
configurable.setConf(getConf)
4040
case _ =>
4141
}
42-
val jobContext = newJobContext(conf, jobId)
42+
val jobContext = newJobContext(getConf, jobId)
4343
inputFormat.setMinPartitions(jobContext, minPartitions)
4444
val rawSplits = inputFormat.getSplits(jobContext).toArray
4545
val result = new Array[Partition](rawSplits.size)

core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ private[spark] class BlockRDDPartition(val blockId: BlockId, idx: Int) extends P
2828
}
2929

3030
private[spark]
31-
class BlockRDD[T: ClassTag](@transient sc: SparkContext, @transient val blockIds: Array[BlockId])
31+
class BlockRDD[T: ClassTag](sc: SparkContext, @transient val blockIds: Array[BlockId])
3232
extends RDD[T](sc, Nil) {
3333

3434
@transient lazy val _locations = BlockManager.blockIdsToHosts(blockIds, SparkEnv.get)
@@ -64,7 +64,7 @@ class BlockRDD[T: ClassTag](@transient sc: SparkContext, @transient val blockIds
6464
*/
6565
private[spark] def removeBlocks() {
6666
blockIds.foreach { blockId =>
67-
sc.env.blockManager.master.removeBlock(blockId)
67+
sparkContext.env.blockManager.master.removeBlock(blockId)
6868
}
6969
_isValid = false
7070
}

core/src/main/scala/org/apache/spark/rdd/CartesianRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ import org.apache.spark.util.Utils
2727
private[spark]
2828
class CartesianPartition(
2929
idx: Int,
30-
@transient rdd1: RDD[_],
31-
@transient rdd2: RDD[_],
30+
@transient private val rdd1: RDD[_],
31+
@transient private val rdd2: RDD[_],
3232
s1Index: Int,
3333
s2Index: Int
3434
) extends Partition {

0 commit comments

Comments
 (0)