File tree Expand file tree Collapse file tree 2 files changed +4
-3
lines changed
core/src/main/scala/org/apache/spark Expand file tree Collapse file tree 2 files changed +4
-3
lines changed Original file line number Diff line number Diff line change @@ -48,9 +48,10 @@ private[spark] class WholeTextFileInputFormat extends CombineFileInputFormat[Str
4848 }
4949
5050 /**
51- * Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API.
51+ * Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API,
52+ * which is set through setMaxSplitSize
5253 */
53- def setMaxSplitSize (context : JobContext , minPartitions : Int ) {
54+ def setMinPartitions (context : JobContext , minPartitions : Int ) {
5455 val files = listStatus(context)
5556 val totalLen = files.map { file =>
5657 if (file.isDir) 0L else file.getLen
Original file line number Diff line number Diff line change @@ -182,7 +182,7 @@ private[spark] class WholeTextFileRDD(
182182 case _ =>
183183 }
184184 val jobContext = newJobContext(conf, jobId)
185- inputFormat.setMaxSplitSize (jobContext, minPartitions)
185+ inputFormat.setMinPartitions (jobContext, minPartitions)
186186 val rawSplits = inputFormat.getSplits(jobContext).toArray
187187 val result = new Array [Partition ](rawSplits.size)
188188 for (i <- 0 until rawSplits.size) {
You can’t perform that action at this time.
0 commit comments