|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark |
| 19 | + |
| 20 | +import java.util.Date |
| 21 | + |
| 22 | +import org.apache.hadoop.conf.Configuration |
| 23 | +import org.apache.hadoop.mapreduce._ |
| 24 | +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl |
| 25 | +import org.apache.spark.internal.Logging |
| 26 | +import org.apache.spark.internal.io.HadoopMapReduceCommitProtocol |
| 27 | +import org.apache.spark.util.SerializableConfiguration |
| 28 | + |
| 29 | +/** |
| 30 | + * Internal helper class that saves an RDD using a Hadoop OutputFormat |
| 31 | + * (from the newer mapreduce API, not the old mapred API). |
| 32 | + * |
| 33 | + * Saves the RDD using a JobConf, which should contain an output key class, an output value class, |
| 34 | + * a filename to write to, etc, exactly like in a Hadoop MapReduce job. |
| 35 | + * |
| 36 | + * Use a [[HadoopMapReduceCommitProtocol]] to handle output commit, which, unlike Hadoop's |
| 37 | + * OutputCommitter, is serializable. |
| 38 | + */ |
| 39 | +private[spark] |
| 40 | +class SparkNewHadoopWriter( |
| 41 | + jobConf: Configuration, |
| 42 | + committer: HadoopMapReduceCommitProtocol) extends Logging with Serializable { |
| 43 | + |
| 44 | + private val now = new Date() |
| 45 | + private val conf = new SerializableConfiguration(jobConf) |
| 46 | + |
| 47 | + private val jobtrackerID = SparkHadoopWriter.createJobTrackerID(new Date()) |
| 48 | + private var jobId = 0 |
| 49 | + private var splitId = 0 |
| 50 | + private var attemptId = 0 |
| 51 | + |
| 52 | + @transient private var writer: RecordWriter[AnyRef, AnyRef] = null |
| 53 | + @transient private var jobContext: JobContext = null |
| 54 | + @transient private var taskContext: TaskAttemptContext = null |
| 55 | + |
| 56 | + def setupJob(): Unit = { |
| 57 | + // Committer setup a job |
| 58 | + committer.setupJob(getJobContext) |
| 59 | + } |
| 60 | + |
| 61 | + def setupTask(context: TaskContext): Unit = { |
| 62 | + // Set jobID/taskID |
| 63 | + jobId = context.stageId |
| 64 | + splitId = context.partitionId |
| 65 | + attemptId = (context.taskAttemptId % Int.MaxValue).toInt |
| 66 | + // Committer setup a task |
| 67 | + committer.setupTask(getTaskContext(context)) |
| 68 | + } |
| 69 | + |
| 70 | + def write(context: TaskContext, key: AnyRef, value: AnyRef): Unit = { |
| 71 | + getWriter(context).write(key, value) |
| 72 | + } |
| 73 | + |
| 74 | + def abortTask(context: TaskContext): Unit = { |
| 75 | + // Close writer |
| 76 | + getWriter(context).close(getTaskContext(context)) |
| 77 | + // Committer abort a task |
| 78 | + committer.abortTask(getTaskContext(context)) |
| 79 | + } |
| 80 | + |
| 81 | + def commitTask(context: TaskContext): Unit = { |
| 82 | + // Close writer |
| 83 | + getWriter(context).close(getTaskContext(context)) |
| 84 | + // Committer commit a task |
| 85 | + committer.commitTask(getTaskContext(context)) |
| 86 | + } |
| 87 | + |
| 88 | + def abortJob(): Unit = { |
| 89 | + committer.abortJob(getJobContext) |
| 90 | + } |
| 91 | + |
| 92 | + def commitJob() { |
| 93 | + committer.commitJob(getJobContext, Seq.empty) |
| 94 | + } |
| 95 | + |
| 96 | + // ********* Private Functions ********* |
| 97 | + |
| 98 | + /* |
| 99 | + * Generate jobContext. Since jobContext is transient, it may be null after serialization. |
| 100 | + */ |
| 101 | + private def getJobContext(): JobContext = { |
| 102 | + if (jobContext == null) { |
| 103 | + val jobAttemptId = new TaskAttemptID(jobtrackerID, jobId, TaskType.MAP, 0, 0) |
| 104 | + jobContext = new TaskAttemptContextImpl(conf.value, jobAttemptId) |
| 105 | + } |
| 106 | + jobContext |
| 107 | + } |
| 108 | + |
| 109 | + /* |
| 110 | + * Generate taskContext. Since taskContext is transient, it may be null after serialization. |
| 111 | + */ |
| 112 | + private def getTaskContext(context: TaskContext): TaskAttemptContext = { |
| 113 | + if (taskContext == null) { |
| 114 | + val attemptId = new TaskAttemptID(jobtrackerID, jobId, TaskType.REDUCE, splitId, |
| 115 | + context.attemptNumber) |
| 116 | + taskContext = new TaskAttemptContextImpl(conf.value, attemptId) |
| 117 | + } |
| 118 | + taskContext |
| 119 | + } |
| 120 | + |
| 121 | + /* |
| 122 | + * Generate writer. Since writer is transient, it may be null after serialization. |
| 123 | + */ |
| 124 | + private def getWriter(context: TaskContext): RecordWriter[AnyRef, AnyRef] = { |
| 125 | + if (writer == null) { |
| 126 | + val format = getJobContext.getOutputFormatClass.newInstance |
| 127 | + writer = format.getRecordWriter(getTaskContext(context)) |
| 128 | + .asInstanceOf[RecordWriter[AnyRef, AnyRef]] |
| 129 | + } |
| 130 | + writer |
| 131 | + } |
| 132 | +} |
0 commit comments