Skip to content

Commit 51e1b2e

Browse files
committed
Rename downgradeToReadLock -> keepReadLock
1 parent 5a1fb12 commit 51e1b2e

File tree

1 file changed

+7
-10
lines changed

1 file changed

+7
-10
lines changed

core/src/main/scala/org/apache/spark/storage/BlockManager.scala

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -658,7 +658,7 @@ private[spark] class BlockManager(
658658
level: StorageLevel,
659659
makeIterator: () => Iterator[Any]): Either[BlockResult, Iterator[Any]] = {
660660
// Initially we hold no locks on this block.
661-
doPut(blockId, IteratorValues(makeIterator), level, downgradeToReadLock = true) match {
661+
doPut(blockId, IteratorValues(makeIterator), level, keepReadLock = true) match {
662662
case None =>
663663
// doPut() didn't hand work back to us, so the block already existed or was successfully
664664
// stored. Therefore, we now hold a read lock on the block.
@@ -729,12 +729,9 @@ private[spark] class BlockManager(
729729
* @param effectiveStorageLevel the level according to which the block will actually be handled.
730730
* This allows the caller to specify an alternate behavior of doPut
731731
* while preserving the original level specified by the user.
732-
* @param downgradeToReadLock if true, this method will downgrade its write lock on the block
733-
* to a read lock before returning; this will happen even if the block
734-
* already exists, so when this is true the caller will always hold
735-
* a read lock on the block after this method returns without throwing
736-
* an exception. If false (default), this method will release the write
737-
* lock before returning.
732+
* @param keepReadLock if true, this method will hold the read lock when it returns (even if the
733+
* block already exists). If false, this method will hold no locks when it
734+
* returns.
738735
* @return `Some(PutResult)` if the block did not exist and could not be successfully cached,
739736
* or None if the block already existed or was successfully stored (fully consuming
740737
* the input data / input iterator).
@@ -745,7 +742,7 @@ private[spark] class BlockManager(
745742
level: StorageLevel,
746743
tellMaster: Boolean = true,
747744
effectiveStorageLevel: Option[StorageLevel] = None,
748-
downgradeToReadLock: Boolean = false): Option[PutResult] = {
745+
keepReadLock: Boolean = false): Option[PutResult] = {
749746

750747
require(blockId != null, "BlockId is null")
751748
require(level != null && level.isValid, "StorageLevel is null or invalid")
@@ -762,7 +759,7 @@ private[spark] class BlockManager(
762759
newInfo
763760
} else {
764761
logWarning(s"Block $blockId already exists on this machine; not re-adding it")
765-
if (!downgradeToReadLock) {
762+
if (!keepReadLock) {
766763
// lockNewBlockForWriting returned a read lock on the existing block, so we must free it:
767764
releaseLock(blockId)
768765
}
@@ -856,7 +853,7 @@ private[spark] class BlockManager(
856853
}
857854
} finally {
858855
if (blockWasSuccessfullyStored) {
859-
if (downgradeToReadLock) {
856+
if (keepReadLock) {
860857
blockInfoManager.downgradeLock(blockId)
861858
} else {
862859
blockInfoManager.unlock(blockId)

0 commit comments

Comments
 (0)