@@ -39,7 +39,7 @@ import org.apache.spark.internal.io._
3939import org .apache .spark .internal .Logging
4040import org .apache .spark .partial .{BoundedDouble , PartialResult }
4141import org .apache .spark .serializer .Serializer
42- import org .apache .spark .util .Utils
42+ import org .apache .spark .util .{ SerializableConfiguration , SerializableJobConf , Utils }
4343import org .apache .spark .util .collection .CompactBuffer
4444import org .apache .spark .util .random .StratifiedSamplingUtils
4545
@@ -1051,9 +1051,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
10511051 * configured for a Hadoop MapReduce job.
10521052 */
10531053 def saveAsNewAPIHadoopDataset (conf : Configuration ): Unit = self.withScope {
1054- val config = SparkHadoopWriterConfig .instantiate[K , V ](
1055- className = classOf [SparkHadoopMapReduceWriterConfig [K , V ]].getName,
1056- conf = conf)
1054+ val config = new SparkHadoopMapReduceWriterConfig [K , V ](new SerializableConfiguration (conf))
10571055 SparkHadoopWriter .write(
10581056 rdd = self,
10591057 config = config)
@@ -1066,9 +1064,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
10661064 * MapReduce job.
10671065 */
10681066 def saveAsHadoopDataset (conf : JobConf ): Unit = self.withScope {
1069- val config = SparkHadoopWriterConfig .instantiate[K , V ](
1070- className = classOf [SparkHadoopMapRedWriterConfig [K , V ]].getName,
1071- conf = conf)
1067+ val config = new SparkHadoopMapRedWriterConfig [K , V ](new SerializableJobConf (conf))
10721068 SparkHadoopWriter .write(
10731069 rdd = self,
10741070 config = config)
0 commit comments