Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -301,16 +301,6 @@ public int getAttemptedItemsCount() {
}
}

@VisibleForTesting
public List<AttemptedItemInfo> getStorageMovementAttemptedItems() {
return storageMovementAttemptedItems;
}

@VisibleForTesting
public BlockingQueue<Block> getMovementFinishedBlocks() {
return movementFinishedBlocks;
}

public void clearQueues() {
movementFinishedBlocks.clear();
synchronized (storageMovementAttemptedItems) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ public void clearQueues() {
* attempted or reported time stamp. This is used by
* {@link BlockStorageMovementAttemptedItems#storageMovementAttemptedItems}.
*/
public final static class AttemptedItemInfo extends ItemInfo {
final static class AttemptedItemInfo extends ItemInfo {
private long lastAttemptedOrReportedTime;
private final Set<Block> blocks;

Expand All @@ -1095,7 +1095,7 @@ public final static class AttemptedItemInfo extends ItemInfo {
* @param retryCount
* file retry count
*/
public AttemptedItemInfo(long rootId, long trackId,
AttemptedItemInfo(long rootId, long trackId,
long lastAttemptedOrReportedTime,
Set<Block> blocks, int retryCount) {
super(rootId, trackId, retryCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import java.util.List;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block;
Expand All @@ -40,12 +39,10 @@
import org.apache.hadoop.hdfs.server.namenode.sps.Context;
import org.apache.hadoop.hdfs.server.namenode.sps.FileCollector;
import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.DatanodeMap;
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.DatanodeWithStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.sps.metrics.ExternalSPSBeanMetrics;
import org.apache.hadoop.net.NetworkTopology;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -65,7 +62,6 @@ public class ExternalSPSContext implements Context {
private final FileCollector fileCollector;
private final BlockMoveTaskHandler externalHandler;
private final BlockMovementListener blkMovementListener;
private ExternalSPSBeanMetrics spsBeanMetrics;

public ExternalSPSContext(SPSService service, NameNodeConnector nnc) {
this.service = service;
Expand Down Expand Up @@ -212,17 +208,4 @@ public void notifyMovementTriedBlocks(Block[] moveAttemptFinishedBlks) {
LOG.info("Movement attempted blocks", actualBlockMovements);
}
}

public void initMetrics(StoragePolicySatisfier sps) {
spsBeanMetrics = new ExternalSPSBeanMetrics(sps);
}

public void closeMetrics() {
spsBeanMetrics.close();
}

@VisibleForTesting
public ExternalSPSBeanMetrics getSpsBeanMetrics() {
return spsBeanMetrics;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@
*/
@InterfaceAudience.Private
public final class ExternalStoragePolicySatisfier {
public static final Logger LOG = LoggerFactory.getLogger(ExternalStoragePolicySatisfier.class);
public static final Logger LOG = LoggerFactory
.getLogger(ExternalStoragePolicySatisfier.class);

private ExternalStoragePolicySatisfier() {
// This is just a class to start and run external sps.
Expand All @@ -59,7 +60,6 @@ private ExternalStoragePolicySatisfier() {
*/
public static void main(String[] args) throws Exception {
NameNodeConnector nnc = null;
ExternalSPSContext context = null;
try {
StringUtils.startupShutdownMessage(StoragePolicySatisfier.class, args,
LOG);
Expand All @@ -69,10 +69,9 @@ public static void main(String[] args) throws Exception {
StoragePolicySatisfier sps = new StoragePolicySatisfier(spsConf);
nnc = getNameNodeConnector(spsConf);

context = new ExternalSPSContext(sps, nnc);
ExternalSPSContext context = new ExternalSPSContext(sps, nnc);
sps.init(context);
sps.start(StoragePolicySatisfierMode.EXTERNAL);
context.initMetrics(sps);
if (sps != null) {
sps.join();
}
Expand All @@ -83,11 +82,6 @@ public static void main(String[] args) throws Exception {
if (nnc != null) {
nnc.close();
}
if (context!= null) {
if (context.getSpsBeanMetrics() != null) {
context.closeMetrics();
}
}
}
}

Expand Down

This file was deleted.

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
Expand Down Expand Up @@ -85,7 +84,6 @@
import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
import org.apache.hadoop.hdfs.server.namenode.sps.BlockStorageMovementAttemptedItems;
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
import org.apache.hadoop.hdfs.server.sps.metrics.ExternalSPSBeanMetrics;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.SecurityUtil;
Expand All @@ -104,8 +102,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.util.function.Supplier;

/**
Expand Down Expand Up @@ -1821,34 +1817,4 @@ public void clear() {
actualBlockMovements.clear();
}
}

@Test(timeout = 300000)
public void testExternalSPSMetrics() throws Exception {
try {
createCluster();
// Start JMX but stop SPS thread to prevent mock data from being consumed.
externalSps.stop(true);
externalCtxt.initMetrics(externalSps);

ExternalSPSBeanMetrics spsBeanMetrics = externalCtxt.getSpsBeanMetrics();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxBeanName = new ObjectName("Hadoop:service=ExternalSPS,name=ExternalSPS");
// Assert metrics before update.
assertEquals(0, mbs.getAttribute(mxBeanName, "AttemptedItemsCount"));
assertEquals(0, mbs.getAttribute(mxBeanName, "ProcessingQueueSize"));
assertEquals(0, mbs.getAttribute(mxBeanName, "MovementFinishedBlocksCount"));

// Update metrics.
spsBeanMetrics.updateAttemptedItemsCount();
spsBeanMetrics.updateProcessingQueueSize();
spsBeanMetrics.updateMovementFinishedBlocksCount();

// Assert metrics after update.
assertEquals(1, mbs.getAttribute(mxBeanName, "AttemptedItemsCount"));
assertEquals(1, mbs.getAttribute(mxBeanName, "ProcessingQueueSize"));
assertEquals(1, mbs.getAttribute(mxBeanName, "MovementFinishedBlocksCount"));
} finally {
shutdownCluster();
}
}
}