Skip to content

Commit d3fed8e

Browse files
author
Benoy Antony
committed
HDFS-7390. Provide JMX metrics per storage type. (Benoy Antony)
1 parent fde20ff commit d3fed8e

File tree

6 files changed

+412
-2
lines changed

6 files changed

+412
-2
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@
3939
import java.util.concurrent.ThreadLocalRandom;
4040
import java.util.concurrent.atomic.AtomicLong;
4141

42+
import javax.management.ObjectName;
43+
4244
import org.apache.hadoop.HadoopIllegalArgumentException;
4345
import org.apache.hadoop.classification.InterfaceAudience;
4446
import org.apache.hadoop.conf.Configuration;
@@ -85,6 +87,7 @@
8587
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
8688
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
8789
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
90+
import org.apache.hadoop.metrics2.util.MBeans;
8891
import org.apache.hadoop.net.Node;
8992
import org.apache.hadoop.security.UserGroupInformation;
9093
import org.apache.hadoop.util.Daemon;
@@ -94,14 +97,15 @@
9497
import com.google.common.annotations.VisibleForTesting;
9598
import com.google.common.base.Preconditions;
9699
import com.google.common.collect.Sets;
100+
97101
import org.slf4j.Logger;
98102
import org.slf4j.LoggerFactory;
99103

100104
/**
101105
* Keeps information related to the blocks stored in the Hadoop cluster.
102106
*/
103107
@InterfaceAudience.Private
104-
public class BlockManager {
108+
public class BlockManager implements BlockStatsMXBean {
105109

106110
public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
107111
public static final Logger blockLog = NameNode.blockStateChangeLog;
@@ -129,6 +133,7 @@ public class BlockManager {
129133
private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
130134
private final long startupDelayBlockDeletionInMs;
131135
private final BlockReportLeaseManager blockReportLeaseManager;
136+
private ObjectName mxBeanName;
132137

133138
/** Used by metrics */
134139
public long getPendingReplicationBlocksCount() {
@@ -468,6 +473,7 @@ public void activate(Configuration conf) {
468473
pendingReplications.start();
469474
datanodeManager.activate(conf);
470475
this.replicationThread.start();
476+
mxBeanName = MBeans.register("NameNode", "BlockStats", this);
471477
}
472478

473479
public void close() {
@@ -3944,6 +3950,8 @@ enum MisReplicationResult {
39443950
public void shutdown() {
39453951
stopReplicationInitializer();
39463952
blocksMap.close();
3953+
MBeans.unregister(mxBeanName);
3954+
mxBeanName = null;
39473955
}
39483956

39493957
public void clear() {
@@ -3954,4 +3962,9 @@ public void clear() {
39543962
public BlockReportLeaseManager getBlockReportLeaseManager() {
39553963
return blockReportLeaseManager;
39563964
}
3965+
3966+
@Override // BlockStatsMXBean
3967+
public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
3968+
return datanodeManager.getDatanodeStatistics().getStorageTypeStats();
3969+
}
39573970
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.blockmanagement;
19+
20+
import java.util.Map;
21+
22+
import org.apache.hadoop.fs.StorageType;
23+
24+
/**
25+
* This is an interface used to retrieve statistic information related to
26+
* block management.
27+
*/
28+
public interface BlockStatsMXBean {
29+
30+
/**
31+
* The statistics of storage types.
32+
*
33+
* @return get storage statistics per storage type
34+
*/
35+
Map<StorageType, StorageTypeStats> getStorageTypeStats();
36+
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.blockmanagement;
1919

20+
import java.util.Map;
21+
22+
import org.apache.hadoop.fs.StorageType;
2023
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
2124

2225
/** Datanode statistics */
@@ -71,4 +74,7 @@ public interface DatanodeStatistics {
7174

7275
/** @return the expired heartbeats */
7376
public int getExpiredHeartbeats();
77+
78+
/** @return Storage Tier statistics*/
79+
Map<StorageType, StorageTypeStats> getStorageTypeStats();
7480
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

Lines changed: 95 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,15 @@
1818
package org.apache.hadoop.hdfs.server.blockmanagement;
1919

2020
import java.util.ArrayList;
21+
import java.util.Collections;
22+
import java.util.HashSet;
23+
import java.util.IdentityHashMap;
2124
import java.util.List;
25+
import java.util.Map;
26+
import java.util.Set;
2227

2328
import org.apache.hadoop.conf.Configuration;
29+
import org.apache.hadoop.fs.StorageType;
2430
import org.apache.hadoop.hdfs.DFSConfigKeys;
2531
import org.apache.hadoop.hdfs.DFSUtilClient;
2632
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -189,6 +195,11 @@ public synchronized int getExpiredHeartbeats() {
189195
return stats.expiredHeartbeats;
190196
}
191197

198+
@Override
199+
public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
200+
return stats.statsMap.get();
201+
}
202+
192203
synchronized void register(final DatanodeDescriptor d) {
193204
if (!d.isAlive) {
194205
addDatanode(d);
@@ -393,6 +404,9 @@ public void run() {
393404
* For decommissioning/decommissioned nodes, only used capacity is counted.
394405
*/
395406
private static class Stats {
407+
408+
private final StorageTypeStatsMap statsMap = new StorageTypeStatsMap();
409+
396410
private long capacityTotal = 0L;
397411
private long capacityUsed = 0L;
398412
private long capacityRemaining = 0L;
@@ -420,6 +434,14 @@ private void add(final DatanodeDescriptor node) {
420434
}
421435
cacheCapacity += node.getCacheCapacity();
422436
cacheUsed += node.getCacheUsed();
437+
Set<StorageType> storageTypes = new HashSet<>();
438+
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
439+
statsMap.addStorage(storageInfo, node);
440+
storageTypes.add(storageInfo.getStorageType());
441+
}
442+
for (StorageType storageType : storageTypes) {
443+
statsMap.addNode(storageType, node);
444+
}
423445
}
424446

425447
private void subtract(final DatanodeDescriptor node) {
@@ -436,12 +458,84 @@ private void subtract(final DatanodeDescriptor node) {
436458
}
437459
cacheCapacity -= node.getCacheCapacity();
438460
cacheUsed -= node.getCacheUsed();
461+
Set<StorageType> storageTypes = new HashSet<>();
462+
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
463+
statsMap.subtractStorage(storageInfo, node);
464+
storageTypes.add(storageInfo.getStorageType());
465+
}
466+
for (StorageType storageType : storageTypes) {
467+
statsMap.subtractNode(storageType, node);
468+
}
439469
}
440470

441471
/** Increment expired heartbeat counter. */
442472
private void incrExpiredHeartbeats() {
443473
expiredHeartbeats++;
444474
}
445475
}
446-
}
447476

477+
/** StorageType specific statistics.
478+
* For decommissioning/decommissioned nodes, only used capacity is counted.
479+
*/
480+
481+
static final class StorageTypeStatsMap {
482+
483+
private Map<StorageType, StorageTypeStats> storageTypeStatsMap =
484+
new IdentityHashMap<>();
485+
486+
private StorageTypeStatsMap() {}
487+
488+
private StorageTypeStatsMap(StorageTypeStatsMap other) {
489+
storageTypeStatsMap =
490+
new IdentityHashMap<>(other.storageTypeStatsMap);
491+
for (Map.Entry<StorageType, StorageTypeStats> entry :
492+
storageTypeStatsMap.entrySet()) {
493+
entry.setValue(new StorageTypeStats(entry.getValue()));
494+
}
495+
}
496+
497+
private Map<StorageType, StorageTypeStats> get() {
498+
return Collections.unmodifiableMap(storageTypeStatsMap);
499+
}
500+
501+
private void addNode(StorageType storageType,
502+
final DatanodeDescriptor node) {
503+
StorageTypeStats storageTypeStats =
504+
storageTypeStatsMap.get(storageType);
505+
if (storageTypeStats == null) {
506+
storageTypeStats = new StorageTypeStats();
507+
storageTypeStatsMap.put(storageType, storageTypeStats);
508+
}
509+
storageTypeStats.addNode(node);
510+
}
511+
512+
private void addStorage(final DatanodeStorageInfo info,
513+
final DatanodeDescriptor node) {
514+
StorageTypeStats storageTypeStats =
515+
storageTypeStatsMap.get(info.getStorageType());
516+
if (storageTypeStats == null) {
517+
storageTypeStats = new StorageTypeStats();
518+
storageTypeStatsMap.put(info.getStorageType(), storageTypeStats);
519+
}
520+
storageTypeStats.addStorage(info, node);
521+
}
522+
523+
private void subtractStorage(final DatanodeStorageInfo info,
524+
final DatanodeDescriptor node) {
525+
StorageTypeStats storageTypeStats =
526+
storageTypeStatsMap.get(info.getStorageType());
527+
if (storageTypeStats != null) {
528+
storageTypeStats.subtractStorage(info, node);
529+
}
530+
}
531+
532+
private void subtractNode(StorageType storageType,
533+
final DatanodeDescriptor node) {
534+
StorageTypeStats storageTypeStats =
535+
storageTypeStatsMap.get(storageType);
536+
if (storageTypeStats != null) {
537+
storageTypeStats.subtractNode(node);
538+
}
539+
}
540+
}
541+
}
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.hdfs.server.blockmanagement;
20+
21+
import java.beans.ConstructorProperties;
22+
23+
import org.apache.hadoop.classification.InterfaceAudience;
24+
import org.apache.hadoop.classification.InterfaceStability;
25+
26+
/**
27+
* Statistics per StorageType.
28+
*
29+
*/
30+
@InterfaceAudience.Private
31+
@InterfaceStability.Evolving
32+
public class StorageTypeStats {
33+
private long capacityTotal = 0L;
34+
private long capacityUsed = 0L;
35+
private long capacityRemaining = 0L;
36+
private long blockPoolUsed = 0L;
37+
private int nodesInService = 0;
38+
39+
@ConstructorProperties({"capacityTotal",
40+
"capacityUsed", "capacityRemaining", "blockPoolUsed", "nodesInService"})
41+
public StorageTypeStats(long capacityTotal, long capacityUsed,
42+
long capacityRemaining, long blockPoolUsed, int nodesInService) {
43+
this.capacityTotal = capacityTotal;
44+
this.capacityUsed = capacityUsed;
45+
this.capacityRemaining = capacityRemaining;
46+
this.blockPoolUsed = blockPoolUsed;
47+
this.nodesInService = nodesInService;
48+
}
49+
50+
public long getCapacityTotal() {
51+
return capacityTotal;
52+
}
53+
54+
public long getCapacityUsed() {
55+
return capacityUsed;
56+
}
57+
58+
public long getCapacityRemaining() {
59+
return capacityRemaining;
60+
}
61+
62+
public long getBlockPoolUsed() {
63+
return blockPoolUsed;
64+
}
65+
66+
public int getNodesInService() {
67+
return nodesInService;
68+
}
69+
70+
StorageTypeStats() {}
71+
72+
StorageTypeStats(StorageTypeStats other) {
73+
capacityTotal = other.capacityTotal;
74+
capacityUsed = other.capacityUsed;
75+
capacityRemaining = other.capacityRemaining;
76+
blockPoolUsed = other.blockPoolUsed;
77+
nodesInService = other.nodesInService;
78+
}
79+
80+
void addStorage(final DatanodeStorageInfo info,
81+
final DatanodeDescriptor node) {
82+
capacityUsed += info.getDfsUsed();
83+
blockPoolUsed += info.getBlockPoolUsed();
84+
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
85+
capacityTotal += info.getCapacity();
86+
capacityRemaining += info.getRemaining();
87+
} else {
88+
capacityTotal += info.getDfsUsed();
89+
}
90+
}
91+
92+
void addNode(final DatanodeDescriptor node) {
93+
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
94+
nodesInService++;
95+
}
96+
}
97+
98+
void subtractStorage(final DatanodeStorageInfo info,
99+
final DatanodeDescriptor node) {
100+
capacityUsed -= info.getDfsUsed();
101+
blockPoolUsed -= info.getBlockPoolUsed();
102+
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
103+
capacityTotal -= info.getCapacity();
104+
capacityRemaining -= info.getRemaining();
105+
} else {
106+
capacityTotal -= info.getDfsUsed();
107+
}
108+
}
109+
110+
void subtractNode(final DatanodeDescriptor node) {
111+
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
112+
nodesInService--;
113+
}
114+
}
115+
}

0 commit comments

Comments
 (0)