Skip to content

Commit bedcd10

Browse files
committed
rename SparkHadoopWriterConfig to HadoopWriteConfigUtil
1 parent 7134e55 commit bedcd10

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriterConfig.scala renamed to core/src/main/scala/org/apache/spark/internal/io/HadoopWriteConfigUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ import org.apache.spark.util.{SerializableConfiguration, SerializableJobConf, Ut
3737
* 3. Implementations should have a constructor with exactly one argument:
3838
* (conf: SerializableConfiguration) or (conf: SerializableJobConf).
3939
*/
40-
abstract class SparkHadoopWriterConfig[K, V: ClassTag] extends Serializable {
40+
abstract class HadoopWriteConfigUtil[K, V: ClassTag] extends Serializable {
4141

4242
// --------------------------------------------------------------------------
4343
// Create JobContext/TaskAttemptContext

core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ object SparkHadoopWriter extends Logging {
5757
*/
5858
def write[K, V: ClassTag](
5959
rdd: RDD[(K, V)],
60-
config: SparkHadoopWriterConfig[K, V]): Unit = {
60+
config: HadoopWriteConfigUtil[K, V]): Unit = {
6161
// Extract context and configuration from RDD.
6262
val sparkContext = rdd.context
6363
val stageId = rdd.id
@@ -119,7 +119,7 @@ object SparkHadoopWriter extends Logging {
119119
/** Write a RDD partition out in a single Spark task. */
120120
private def executeTask[K, V: ClassTag](
121121
context: TaskContext,
122-
config: SparkHadoopWriterConfig[K, V],
122+
config: HadoopWriteConfigUtil[K, V],
123123
jobTrackerId: String,
124124
sparkStageId: Int,
125125
sparkPartitionId: Int,
@@ -175,8 +175,8 @@ object SparkHadoopWriter extends Logging {
175175
* A helper class that reads JobConf from older mapred API, creates output Format/Committer/Writer.
176176
*/
177177
private[spark]
178-
class SparkHadoopMapRedWriterConfig[K, V: ClassTag](conf: SerializableJobConf)
179-
extends SparkHadoopWriterConfig[K, V] with Logging {
178+
class HadoopMapRedWriteConfigUtil[K, V: ClassTag](conf: SerializableJobConf)
179+
extends HadoopWriteConfigUtil[K, V] with Logging {
180180

181181
private var outputFormat: Class[_ <: OutputFormat[K, V]] = null
182182
private var writer: RecordWriter[K, V] = null
@@ -308,8 +308,8 @@ class SparkHadoopMapRedWriterConfig[K, V: ClassTag](conf: SerializableJobConf)
308308
* Format/Committer/Writer.
309309
*/
310310
private[spark]
311-
class SparkHadoopMapReduceWriterConfig[K, V: ClassTag](conf: SerializableConfiguration)
312-
extends SparkHadoopWriterConfig[K, V] with Logging {
311+
class HadoopMapReduceWriteConfigUtil[K, V: ClassTag](conf: SerializableConfiguration)
312+
extends HadoopWriteConfigUtil[K, V] with Logging {
313313

314314
private var outputFormat: Class[_ <: NewOutputFormat[K, V]] = null
315315
private var writer: NewRecordWriter[K, V] = null

core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
10511051
* configured for a Hadoop MapReduce job.
10521052
*/
10531053
def saveAsNewAPIHadoopDataset(conf: Configuration): Unit = self.withScope {
1054-
val config = new SparkHadoopMapReduceWriterConfig[K, V](new SerializableConfiguration(conf))
1054+
val config = new HadoopMapReduceWriteConfigUtil[K, V](new SerializableConfiguration(conf))
10551055
SparkHadoopWriter.write(
10561056
rdd = self,
10571057
config = config)
@@ -1064,7 +1064,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
10641064
* MapReduce job.
10651065
*/
10661066
def saveAsHadoopDataset(conf: JobConf): Unit = self.withScope {
1067-
val config = new SparkHadoopMapRedWriterConfig[K, V](new SerializableJobConf(conf))
1067+
val config = new HadoopMapRedWriteConfigUtil[K, V](new SerializableJobConf(conf))
10681068
SparkHadoopWriter.write(
10691069
rdd = self,
10701070
config = config)

0 commit comments

Comments
 (0)