Skip to content

Commit f36edd5

Browse files
committed
Code review feedback
1 parent d1c0494 commit f36edd5

File tree

2 files changed

+3
-2
lines changed

2 files changed

+3
-2
lines changed

core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockManager.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ class IndexShuffleBlockManager(conf: SparkConf) extends ShuffleBlockResolver {
118118
private[spark] object IndexShuffleBlockManager {
119119
// No-op reduce ID used in interactions with disk store and BlockObjectWriter.
120120
// The disk store currently expects puts to relate to a (map, reduce) pair, but in the sort
121-
// shuffle outputs from a map for several
121+
// shuffle outputs for several reduces are glommed into a single file.
122+
// TODO: Avoid this entirely by having the DiskBlockObjectWriter not require a BlockId.
122123
val NOOP_REDUCE_ID = 0
123124
}

core/src/main/scala/org/apache/spark/storage/BlockManager.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -442,7 +442,7 @@ private[spark] class BlockManager(
442442
val shuffleBlockManager = shuffleManager.shuffleBlockResolver
443443
// TODO: This should gracefully handle case where local block is not available. Currently
444444
// downstream code will throw an exception.
445-
Some(shuffleBlockManager.getBlockData(blockId.asInstanceOf[ShuffleBlockId]).nioByteBuffer())
445+
Option(shuffleBlockManager.getBlockData(blockId.asInstanceOf[ShuffleBlockId]).nioByteBuffer())
446446
} else {
447447
doGetLocal(blockId, asBlockResult = false).asInstanceOf[Option[ByteBuffer]]
448448
}

0 commit comments

Comments
 (0)