Skip to content

Commit f0ab1e6

Browse files
authored
Revert "HDFS-17496. DataNode supports more fine-grained dataset lock based on…" (apache#7279)
This reverts commit 94d6a77.
1 parent d44ac28 commit f0ab1e6

File tree

10 files changed

+45
-248
lines changed

10 files changed

+45
-248
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1744,10 +1744,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
17441744
public static final boolean
17451745
DFS_DATANODE_LOCKMANAGER_TRACE_DEFAULT = false;
17461746

1747-
public static final String DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY =
1748-
"dfs.datanode.dataset.sublock.count";
1749-
public static final long DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT = 1000L;
1750-
17511747
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
17521748
@Deprecated
17531749
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@ public interface DataNodeLockManager<T extends AutoCloseDataSetLock> {
2929
*/
3030
enum LockLevel {
3131
BLOCK_POOl,
32-
VOLUME,
33-
DIR
32+
VOLUME
3433
}
3534

3635
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java

Lines changed: 2 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,6 @@ private String generateLockName(LockLevel level, String... resources) {
9696
+ resources[0] + "volume lock :" + resources[1]);
9797
}
9898
return resources[0] + resources[1];
99-
} else if (resources.length == 3 && level == LockLevel.DIR) {
100-
if (resources[0] == null || resources[1] == null || resources[2] == null) {
101-
throw new IllegalArgumentException("acquire a null dataset lock : "
102-
+ resources[0] + ",volume lock :" + resources[1]
103-
+ ",subdir lock :" + resources[2]);
104-
}
105-
return resources[0] + resources[1] + resources[2];
10699
} else {
107100
throw new IllegalArgumentException("lock level do not match resource");
108101
}
@@ -163,7 +156,7 @@ public DataSetLockManager(Configuration conf, DataNode dn) {
163156
public AutoCloseDataSetLock readLock(LockLevel level, String... resources) {
164157
if (level == LockLevel.BLOCK_POOl) {
165158
return getReadLock(level, resources[0]);
166-
} else if (level == LockLevel.VOLUME){
159+
} else {
167160
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
168161
AutoCloseDataSetLock volLock = getReadLock(level, resources);
169162
volLock.setParentLock(bpLock);
@@ -172,25 +165,14 @@ public AutoCloseDataSetLock readLock(LockLevel level, String... resources) {
172165
resources[0]);
173166
}
174167
return volLock;
175-
} else {
176-
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
177-
AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
178-
volLock.setParentLock(bpLock);
179-
AutoCloseDataSetLock dirLock = getReadLock(level, resources);
180-
dirLock.setParentLock(volLock);
181-
if (openLockTrace) {
182-
LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
183-
resources[0] + resources[1]);
184-
}
185-
return dirLock;
186168
}
187169
}
188170

189171
@Override
190172
public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) {
191173
if (level == LockLevel.BLOCK_POOl) {
192174
return getWriteLock(level, resources[0]);
193-
} else if (level == LockLevel.VOLUME) {
175+
} else {
194176
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
195177
AutoCloseDataSetLock volLock = getWriteLock(level, resources);
196178
volLock.setParentLock(bpLock);
@@ -199,17 +181,6 @@ public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) {
199181
resources[0]);
200182
}
201183
return volLock;
202-
} else {
203-
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
204-
AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
205-
volLock.setParentLock(bpLock);
206-
AutoCloseDataSetLock dirLock = getWriteLock(level, resources);
207-
dirLock.setParentLock(volLock);
208-
if (openLockTrace) {
209-
LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
210-
resources[0] + resources[1]);
211-
}
212-
return dirLock;
213184
}
214185
}
215186

@@ -264,13 +235,8 @@ public void addLock(LockLevel level, String... resources) {
264235
String lockName = generateLockName(level, resources);
265236
if (level == LockLevel.BLOCK_POOl) {
266237
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
267-
} else if (level == LockLevel.VOLUME) {
268-
lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
269-
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
270238
} else {
271239
lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
272-
lockMap.addLock(generateLockName(LockLevel.VOLUME, resources[0], resources[1]),
273-
new ReentrantReadWriteLock(isFair));
274240
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
275241
}
276242
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetSubLockStrategy.java

Lines changed: 0 additions & 36 deletions
This file was deleted.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ModDataSetSubLockStrategy.java

Lines changed: 0 additions & 53 deletions
This file was deleted.

0 commit comments

Comments
 (0)