diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala index 5788b70e75a7..64d2032a1272 100644 --- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala @@ -234,10 +234,10 @@ abstract class RDD[T: ClassTag]( * because DAGs are acyclic, and we only ever hold locks for one path in that DAG, there is no * chance of deadlock. * - * The use of Integer is simply so this is serializable -- executors may reference the shared - * fields (though they should never mutate them, that only happens on the driver). + * Executors may reference the shared fields (though they should never mutate them, + * that only happens on the driver). */ - private val stateLock = new Integer(0) + private val stateLock = new Serializable {} // Our dependencies and partitions will be gotten by calling subclass's methods below, and will // be overwritten when we're checkpointed diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MutableProjectionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MutableProjectionSuite.scala index 63700a1e94a3..c31310bc5402 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MutableProjectionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MutableProjectionSuite.scala @@ -60,7 +60,7 @@ class MutableProjectionSuite extends SparkFunSuite with ExpressionEvalHelper { val scalaValues = Seq("abc", BigDecimal(10), IntervalUtils.stringToInterval(UTF8String.fromString("interval 1 day")), Array[Byte](1, 2), Array("123", "456"), Map(1 -> "a", 2 -> "b"), Row(1, "a"), - new java.lang.Integer(5)) + Integer.valueOf(5)) val inputRow = InternalRow.fromSeq(scalaValues.zip(variableLengthTypes).map { case (v, dataType) => CatalystTypeConverters.createToCatalystConverter(dataType)(v) }) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala index 49e6e8f2cb8e..cc3995516dcc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala @@ -443,12 +443,12 @@ class UDFSuite extends QueryTest with SharedSparkSession { test("SPARK-25044 Verify null input handling for primitive types - with udf(Any, DataType)") { val f = udf((x: Int) => x, IntegerType) checkAnswer( - Seq(new Integer(1), null).toDF("x").select(f($"x")), + Seq(Integer.valueOf(1), null).toDF("x").select(f($"x")), Row(1) :: Row(0) :: Nil) val f2 = udf((x: Double) => x, DoubleType) checkAnswer( - Seq(new java.lang.Double(1.1), null).toDF("x").select(f2($"x")), + Seq(java.lang.Double.valueOf(1.1), null).toDF("x").select(f2($"x")), Row(1.1) :: Row(0.0) :: Nil) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala index c160ff206a62..ca1af73b038a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala @@ -68,6 +68,6 @@ class TestUDF extends GenericUDF { override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = { val point = data.getList(arguments(0).get()) - new java.lang.Double(point.get(0).asInstanceOf[Double]) + java.lang.Double.valueOf(point.get(0).asInstanceOf[Double]) } }