diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtil.java index f78e9de9a38f..1c8f8b945335 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtil.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtil.java @@ -49,26 +49,19 @@ public class HBaseCommonTestingUtil { * Compression algorithms to use in parameterized JUnit 4 tests */ public static final List COMPRESSION_ALGORITHMS_PARAMETERIZED = - Arrays.asList(new Object[][] { - { Compression.Algorithm.NONE }, - { Compression.Algorithm.GZ } - }); + Arrays.asList(new Object[][] { { Compression.Algorithm.NONE }, { Compression.Algorithm.GZ } }); /** * This is for unit tests parameterized with a two booleans. */ public static final List BOOLEAN_PARAMETERIZED = - Arrays.asList(new Object[][] { - {false}, - {true} - }); + Arrays.asList(new Object[][] { { false }, { true } }); /** * Compression algorithms to use in testing */ - public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { - Compression.Algorithm.NONE, Compression.Algorithm.GZ - }; + public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = + { Compression.Algorithm.NONE, Compression.Algorithm.GZ }; protected final Configuration conf; @@ -82,7 +75,6 @@ public HBaseCommonTestingUtil(Configuration conf) { /** * Returns this classes's instance of {@link Configuration}. - * * @return Instance of Configuration. */ public Configuration getConfiguration() { @@ -92,8 +84,7 @@ public Configuration getConfiguration() { /** * System property key to get base test directory value */ - public static final String BASE_TEST_DIRECTORY_KEY = - "test.build.data.basedirectory"; + public static final String BASE_TEST_DIRECTORY_KEY = "test.build.data.basedirectory"; /** * Default base directory for test output. @@ -127,13 +118,11 @@ public Path getDataTestDir(final String name) { /** * Sets up a directory for a test to use. - * * @return New directory path, if created. */ protected Path setupDataTestDir() { if (this.dataTestDir != null) { - LOG.warn("Data test dir already setup in " + - dataTestDir.getAbsolutePath()); + LOG.warn("Data test dir already setup in " + dataTestDir.getAbsolutePath()); return null; } Path testPath = getRandomDir(); @@ -151,7 +140,7 @@ protected Path setupDataTestDir() { } /** - * Returns A dir with a random (uuid) name under the test dir + * Returns a dir with a random (uuid) name under the test dir * @see #getBaseTestDir() */ public Path getRandomDir() { @@ -159,8 +148,7 @@ public Path getRandomDir() { } public static UUID getRandomUUID() { - return new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()); + return new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } protected void createSubDir(String propertyName, Path parent, String subDirName) { @@ -212,8 +200,7 @@ public boolean cleanupTestDir(final String subdir) { * @see #setupDataTestDir() */ private Path getBaseTestDir() { - String PathName = System.getProperty( - BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY); + String PathName = System.getProperty(BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY); return new Path(PathName); } @@ -248,8 +235,7 @@ boolean deleteDir(final File dir) { /** * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}. */ - public long waitFor(long timeout, Predicate predicate) - throws E { + public long waitFor(long timeout, Predicate predicate) throws E { return Waiter.waitFor(this.conf, timeout, predicate); } @@ -257,15 +243,15 @@ public long waitFor(long timeout, Predicate predicate) * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}. */ public long waitFor(long timeout, long interval, Predicate predicate) - throws E { + throws E { return Waiter.waitFor(this.conf, timeout, interval, predicate); } /** * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}. */ - public long waitFor(long timeout, long interval, - boolean failIfTimeout, Predicate predicate) throws E { + public long waitFor(long timeout, long interval, boolean failIfTimeout, + Predicate predicate) throws E { return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate); } @@ -331,12 +317,11 @@ public int randomFreePort() { } /** - * Returns a random port. These ports cannot be registered with IANA and are - * intended for dynamic allocation (see http://bit.ly/dynports). + * Returns a random port. These ports cannot be registered with IANA and are intended for + * dynamic allocation (see http://bit.ly/dynports). */ private int randomPort() { - return MIN_RANDOM_PORT - + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT); + return MIN_RANDOM_PORT + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT); } interface AvailablePortChecker { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index 547753ca4980..4fcb51ff30b6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -211,7 +211,7 @@ private void installSlowingCoproc() throws IOException, InterruptedException { TableDescriptor desc = admin.getDescriptor(t); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(desc); builder.setCoprocessor(SlowMeCoproScanOperations.class.getName()); - HBaseTestingUtil.modifyTableSync(admin, builder.build()); + admin.modifyTable(builder.build()); } @Test diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 45eb55fa4c21..28d44edb76b4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Job; @@ -73,7 +74,7 @@ public abstract class MultiTableInputFormatTestBase { @BeforeClass public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level - TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); + Log4jUtils.enableDebug(MultiTableInputFormatBase.class); // start mini hbase cluster TEST_UTIL.startMiniCluster(3); // create and fill table diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index b4a53ea65e82..eca7ca6f32d6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.mapreduce.Job; @@ -43,8 +44,8 @@ public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @BeforeClass public static void setupLogging() { - TEST_UTIL.enableDebug(MultiTableInputFormat.class); - } + Log4jUtils.enableDebug(MultiTableInputFormat.class); + } @Override protected void initJob(List scans, Job job) throws IOException { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index 89e7b49eb695..c9ba9badfa2a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -53,8 +54,8 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest @BeforeClass public static void setUpSnapshots() throws Exception { - TEST_UTIL.enableDebug(MultiTableSnapshotInputFormat.class); - TEST_UTIL.enableDebug(MultiTableSnapshotInputFormatImpl.class); + Log4jUtils.enableDebug(MultiTableSnapshotInputFormat.class); + Log4jUtils.enableDebug(MultiTableSnapshotInputFormatImpl.class); // take a snapshot of every table we have. for (String tableName : TABLES) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index d43e62b5823c..0584be85e72b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -28,28 +28,27 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This class defines methods that can help with managing HBase clusters - * from unit tests and system tests. There are 3 types of cluster deployments: + * This class defines methods that can help with managing HBase clusters from unit tests and system + * tests. There are 3 types of cluster deployments: *
    *
  • SingleProcessHBaseCluster: each server is run in the same JVM in separate threads, * used by unit tests
  • *
  • DistributedHBaseCluster: the cluster is pre-deployed, system and integration tests can - * interact with the cluster.
  • - *
  • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate - * JVMs.
  • + * interact with the cluster. + *
  • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate JVMs. + *
  • *
*

- * HBaseCluster unifies the way tests interact with the cluster, so that the same test can - * be run against a mini-cluster during unit test execution, or a distributed cluster having - * tens/hundreds of nodes during execution of integration tests. - * + * HBaseCluster unifies the way tests interact with the cluster, so that the same test can be run + * against a mini-cluster during unit test execution, or a distributed cluster having tens/hundreds + * of nodes during execution of integration tests. *

* HBaseCluster exposes client-side public interfaces to tests, so that tests does not assume - * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, - * and some tests will still need to mock stuff and introspect internal state. For those use - * cases from unit tests, or if more control is needed, you can use the subclasses directly. - * In that sense, this class does not abstract away every interface that - * SingleProcessHBaseCluster or DistributedHBaseCluster provide. + * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, and + * some tests will still need to mock stuff and introspect internal state. For those use cases from + * unit tests, or if more control is needed, you can use the subclasses directly. In that sense, + * this class does not abstract away every interface that SingleProcessHBaseCluster + * or DistributedHBaseCluster provide. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceStability.Evolving @@ -85,24 +84,23 @@ public Configuration getConf() { public abstract ClusterMetrics getClusterMetrics() throws IOException; /** - * Returns a ClusterStatus for this HBase cluster as observed at the - * starting of the HBaseCluster + * Returns a ClusterStatus for this HBase cluster as observed at the starting of the HBaseCluster */ public ClusterMetrics getInitialClusterMetrics() throws IOException { return initialClusterStatus; } /** - * Starts a new region server on the given hostname or if this is a mini/local cluster, - * starts a region server locally. + * Starts a new region server on the given hostname or if this is a mini/local cluster, starts a + * region server locally. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startRegionServer(String hostname, int port) throws IOException; /** - * Kills the region server process if this is a distributed cluster, otherwise - * this causes the region server to exit doing basic clean up only. + * Kills the region server process if this is a distributed cluster, otherwise this causes the + * region server to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killRegionServer(ServerName serverName) throws IOException; @@ -110,9 +108,9 @@ public ClusterMetrics getInitialClusterMetrics() throws IOException { /** * Keeping track of killed servers and being able to check if a particular server was killed makes * it possible to do fault tolerance testing for dead servers in a deterministic way. A concrete - * example of such case is - killing servers and waiting for all regions of a particular table - * to be assigned. We can check for server column in META table and that its value is not one - * of the killed servers. + * example of such case is - killing servers and waiting for all regions of a particular table to + * be assigned. We can check for server column in META table and that its value is not one of the + * killed servers. */ public abstract boolean isKilledRS(ServerName serverName); @@ -127,7 +125,7 @@ public ClusterMetrics getInitialClusterMetrics() throws IOException { * @throws IOException if something goes wrong or timeout occurs */ public void waitForRegionServerToStart(String hostname, int port, long timeout) - throws IOException { + throws IOException { long start = EnvironmentEdgeManager.currentTime(); while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) { @@ -137,8 +135,8 @@ public void waitForRegionServerToStart(String hostname, int port, long timeout) } Threads.sleep(100); } - throw new IOException("did timeout " + timeout + "ms waiting for region server to start: " - + hostname); + throw new IOException( + "did timeout " + timeout + "ms waiting for region server to start: " + hostname); } /** @@ -146,7 +144,7 @@ public void waitForRegionServerToStart(String hostname, int port, long timeout) * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForRegionServerToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Suspend the region server @@ -163,23 +161,23 @@ public abstract void waitForRegionServerToStop(ServerName serverName, long timeo public abstract void resumeRegionServer(ServerName serverName) throws IOException; /** - * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, silently + * logs warning message. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startZkNode(String hostname, int port) throws IOException; /** - * Kills the zookeeper node process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the zookeeper node process if this is a distributed cluster, otherwise, this causes + * master to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killZkNode(ServerName serverName) throws IOException; /** - * Stops the region zookeeper if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the region zookeeper if this is a distributed cluster, otherwise silently logs warning + * message. * @throws IOException if something goes wrong */ public abstract void stopZkNode(ServerName serverName) throws IOException; @@ -188,33 +186,30 @@ public abstract void waitForRegionServerToStop(ServerName serverName, long timeo * Wait for the specified zookeeper node to join the cluster * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) throws IOException; /** * Wait for the specified zookeeper node to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) throws IOException; /** - * Starts a new datanode on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new datanode on the given hostname or if this is a mini/local cluster, silently logs + * warning message. * @throws IOException if something goes wrong */ public abstract void startDataNode(ServerName serverName) throws IOException; /** - * Kills the datanode process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the datanode process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killDataNode(ServerName serverName) throws IOException; /** - * Stops the datanode if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the datanode if this is a distributed cluster, otherwise silently logs warning message. * @throws IOException if something goes wrong */ public abstract void stopDataNode(ServerName serverName) throws IOException; @@ -258,26 +253,26 @@ public abstract void waitForDataNodeToStop(ServerName serverName, long timeout) * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStart(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Wait for the specified namenode to stop * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** - * Starts a new master on the given hostname or if this is a mini/local cluster, - * starts a master locally. + * Starts a new master on the given hostname or if this is a mini/local cluster, starts a master + * locally. * @param hostname the hostname to start the master on * @throws IOException if something goes wrong */ public abstract void startMaster(String hostname, int port) throws IOException; /** - * Kills the master process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the master process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killMaster(ServerName serverName) throws IOException; @@ -292,31 +287,23 @@ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) * Wait for the specified master to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForMasterToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForMasterToStop(ServerName serverName, long timeout) throws IOException; /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. * @throws IOException if something goes wrong or timeout occurs */ - public boolean waitForActiveAndReadyMaster() - throws IOException { + public boolean waitForActiveAndReadyMaster() throws IOException { return waitForActiveAndReadyMaster(Long.MAX_VALUE); } /** - * Blocks until there is an active master and that master has completed - * initialization. + * Blocks until there is an active master and that master has completed initialization. * @param timeout the timeout limit in ms - * @return true if an active master becomes available. false if there are no - * masters left. + * @return true if an active master becomes available. false if there are no masters left. */ - public abstract boolean waitForActiveAndReadyMaster(long timeout) - throws IOException; + public abstract boolean waitForActiveAndReadyMaster(long timeout) throws IOException; /** * Wait for HBase Cluster to shut down. @@ -329,10 +316,9 @@ public abstract boolean waitForActiveAndReadyMaster(long timeout) public abstract void shutdown() throws IOException; /** - * Restores the cluster to it's initial state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing. + * This is a best effort restore. If the servers are not reachable, or insufficient permissions, + * etc. restoration might be partial. * @return whether restoration is complete */ public boolean restoreInitialStatus() throws IOException { @@ -340,10 +326,9 @@ public boolean restoreInitialStatus() throws IOException { } /** - * Restores the cluster to given state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is + * a best effort restore. If the servers are not reachable, or insufficient permissions, etc. + * restoration might be partial. * @return whether restoration is complete */ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException { @@ -365,19 +350,19 @@ public ServerName getServerHoldingMeta() throws IOException { * @return ServerName that hosts the region or null */ public abstract ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException; + throws IOException; /** - * @return whether we are interacting with a distributed cluster as opposed to an - * in-process mini/local cluster. + * @return whether we are interacting with a distributed cluster as opposed to an in-process + * mini/local cluster. */ public boolean isDistributedCluster() { return false; } /** - * Closes all the resources held open for this cluster. Note that this call does not shutdown - * the cluster. + * Closes all the resources held open for this cluster. Note that this call does not shutdown the + * cluster. * @see #shutdown() */ @Override @@ -385,8 +370,6 @@ public boolean isDistributedCluster() { /** * Wait for the namenode. - * - * @throws InterruptedException */ public void waitForNamenodeAvailable() throws InterruptedException { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 1979732861dc..37a255ba869b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -156,20 +156,23 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Facility for testing HBase. Replacement for - * old HBaseTestCase and HBaseClusterTestCase functionality. - * Create an instance and keep it around testing HBase. This class is - * meant to be your one-stop shop for anything you might need testing. Manages - * one cluster at a time only. Managed cluster can be an in-process - * {@link SingleProcessHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}. - * Not all methods work with the real cluster. - * Depends on log4j being on classpath and - * hbase-site.xml for logging and test-run configuration. It does not set - * logging levels. - * In the configuration properties, default values for master-info-port and - * region-server-port are overridden such that a random port will be assigned (thus - * avoiding port contention if another local HBase instance is already running). - *

To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" + * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase + * functionality. Create an instance and keep it around testing HBase. + *

+ * This class is meant to be your one-stop shop for anything you might need testing. Manages one + * cluster at a time only. Managed cluster can be an in-process {@link SingleProcessHBaseCluster}, + * or a deployed cluster of type {@code DistributedHBaseCluster}. Not all methods work with the real + * cluster. + *

+ * Depends on log4j being on classpath and hbase-site.xml for logging and test-run configuration. + *

+ * It does not set logging levels. + *

+ * In the configuration properties, default values for master-info-port and region-server-port are + * overridden such that a random port will be assigned (thus avoiding port contention if another + * local HBase instance is already running). + *

+ * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" * setting it to true. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @@ -188,12 +191,10 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil { public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; /** - * The default number of regions per regionserver when creating a pre-split - * table. + * The default number of regions per regionserver when creating a pre-split table. */ public static final int DEFAULT_REGIONS_PER_SERVER = 3; - public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table"; public static final boolean PRESPLIT_TEST_TABLE = true; @@ -207,8 +208,9 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil { private String hadoopLogDir; - /** Directory on test filesystem where we put the data for this instance of - * HBaseTestingUtility*/ + /** + * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility + */ private Path dataTestDirOnTestFS = null; private final AtomicReference asyncConnection = new AtomicReference<>(); @@ -221,7 +223,6 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil { /** * Checks to see if a specific port is available. - * * @param port the port number to check for availability * @return true if the port is available, or false if not */ @@ -254,13 +255,11 @@ public static boolean available(int port) { } /** - * Create all combinations of Bloom filters and compression algorithms for - * testing. + * Create all combinations of Bloom filters and compression algorithms for testing. */ private static List bloomAndCompressionCombinations() { List configurations = new ArrayList<>(); - for (Compression.Algorithm comprAlgo : - HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { + for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { for (BloomType bloomType : BloomType.values()) { configurations.add(new Object[] { comprAlgo, bloomType }); } @@ -294,27 +293,27 @@ public static List memStoreTSTagsAndOffheapCombination() { } public static final Collection BLOOM_AND_COMPRESSION_COMBINATIONS = - bloomAndCompressionCombinations(); - + bloomAndCompressionCombinations(); /** - *

Create an HBaseTestingUtility using a default configuration. - * - *

Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. + *

+ * Create an HBaseTestingUtility using a default configuration. + *

+ * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. */ public HBaseTestingUtil() { this(HBaseConfiguration.create()); } /** - *

Create an HBaseTestingUtility using a given configuration. - * - *

Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. - * + *

+ * Create an HBaseTestingUtility using a given configuration. + *

+ * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. * @param conf The configuration to use for further operations */ public HBaseTestingUtil(@Nullable Configuration conf) { @@ -333,21 +332,21 @@ public HBaseTestingUtil(@Nullable Configuration conf) { // Every cluster is a local cluster until we start DFS // Note that conf could be null, but this.conf will not be String dataTestDir = getDataTestDir().toString(); - this.conf.set("fs.defaultFS","file:///"); + this.conf.set("fs.defaultFS", "file:///"); this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir); LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir); - this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false); + this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false); // If the value for random ports isn't set set it to true, thus making // tests opt-out for random port assignment this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, - this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); + this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); } /** * Close both the region {@code r} and it's underlying WAL. For use in tests. */ public static void closeRegionAndWAL(final Region r) throws IOException { - closeRegionAndWAL((HRegion)r); + closeRegionAndWAL((HRegion) r); } /** @@ -361,13 +360,11 @@ public static void closeRegionAndWAL(final HRegion r) throws IOException { } /** - * Returns this classes's instance of {@link Configuration}. Be careful how - * you use the returned Configuration since {@link Connection} instances - * can be shared. The Map of Connections is keyed by the Configuration. If - * say, a Connection was being used against a cluster that had been shutdown, - * see {@link #shutdownMiniCluster()}, then the Connection will no longer - * be wholesome. Rather than use the return direct, its usually best to - * make a copy and use that. Do + * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned + * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed + * by the Configuration. If say, a Connection was being used against a cluster that had been + * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome. + * Rather than use the return direct, its usually best to make a copy and use that. Do * Configuration c = new Configuration(INSTANCE.getConfiguration()); * @return Instance of Configuration. */ @@ -381,19 +378,14 @@ public void setHBaseCluster(HBaseClusterInterface hbaseCluster) { } /** - * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. - * Give it a random name so can have many concurrent tests running if - * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY} - * System property, as it's what minidfscluster bases - * it data dir on. Moding a System property is not the way to do concurrent - * instances -- another instance could grab the temporary - * value unintentionally -- but not anything can do about it at moment; - * single instance only is how the minidfscluster works. - * - * We also create the underlying directory names for - * hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values - * in the conf, and as a system property for hadoop.tmp.dir (We do not create them!). - * + * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can + * have many concurrent tests running if we need to. It needs to amend the + * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on. + * Moding a System property is not the way to do concurrent instances -- another instance could + * grab the temporary value unintentionally -- but not anything can do about it at moment; single + * instance only is how the minidfscluster works. We also create the underlying directory names + * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the + * conf, and as a system property for hadoop.tmp.dir (We do not create them!). * @return The calculated data test build directory, if newly-created. */ @Override @@ -403,40 +395,31 @@ protected Path setupDataTestDir() { return null; } - createSubDirAndSystemProperty( - "hadoop.log.dir", - testPath, "hadoop-log-dir"); + createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir"); // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but - // we want our own value to ensure uniqueness on the same machine - createSubDirAndSystemProperty( - "hadoop.tmp.dir", - testPath, "hadoop-tmp-dir"); + // we want our own value to ensure uniqueness on the same machine + createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir"); // Read and modified in org.apache.hadoop.mapred.MiniMRCluster - createSubDir( - "mapreduce.cluster.local.dir", - testPath, "mapred-local-dir"); + createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir"); return testPath; } - private void createSubDirAndSystemProperty( - String propertyName, Path parent, String subDirName){ + private void createSubDirAndSystemProperty(String propertyName, Path parent, String subDirName) { String sysValue = System.getProperty(propertyName); if (sysValue != null) { // There is already a value set. So we do nothing but hope - // that there will be no conflicts - LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+ - sysValue + " so I do NOT create it in " + parent); + // that there will be no conflicts + LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + + " so I do NOT create it in " + parent); String confValue = conf.get(propertyName); - if (confValue != null && !confValue.endsWith(sysValue)){ - LOG.warn( - propertyName + " property value differs in configuration and system: "+ - "Configuration="+confValue+" while System="+sysValue+ - " Erasing configuration value by system value." - ); + if (confValue != null && !confValue.endsWith(sysValue)) { + LOG.warn(propertyName + " property value differs in configuration and system: " + + "Configuration=" + confValue + " while System=" + sysValue + + " Erasing configuration value by system value."); } conf.set(propertyName, sysValue); } else { @@ -447,8 +430,8 @@ private void createSubDirAndSystemProperty( } /** - * @return Where to write test data on the test filesystem; Returns working directory - * for the test filesystem by default + * @return Where to write test data on the test filesystem; Returns working directory for the test + * filesystem by default * @see #setupDataTestDirOnTestFS() * @see #getTestFileSystem() */ @@ -458,9 +441,9 @@ private Path getBaseTestDirOnTestFS() throws IOException { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem */ public Path getDataTestDirOnTestFS() throws IOException { @@ -472,9 +455,9 @@ public Path getDataTestDirOnTestFS() throws IOException { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem * @param subdirName name of the subdir to create under the base test dir */ @@ -483,13 +466,12 @@ public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { } /** - * Sets up a path in test filesystem to be used by tests. - * Creates a new directory if not already setup. + * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already + * setup. */ private void setupDataTestDirOnTestFS() throws IOException { if (dataTestDirOnTestFS != null) { - LOG.warn("Data test on test fs dir already setup in " - + dataTestDirOnTestFS.toString()); + LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString()); return; } dataTestDirOnTestFS = getNewDataTestDirOnTestFS(); @@ -499,10 +481,10 @@ private void setupDataTestDirOnTestFS() throws IOException { * Sets up a new path in test filesystem to be used by tests. */ private Path getNewDataTestDirOnTestFS() throws IOException { - //The file system can be either local, mini dfs, or if the configuration - //is supplied externally, it can be an external cluster FS. If it is a local - //file system, the tests should use getBaseTestDir, otherwise, we can use - //the working directory, and create a unique sub dir there + // The file system can be either local, mini dfs, or if the configuration + // is supplied externally, it can be an external cluster FS. If it is a local + // file system, the tests should use getBaseTestDir, otherwise, we can use + // the working directory, and create a unique sub dir there FileSystem fs = getTestFileSystem(); Path newDataTestDir; String randomStr = getRandomUUID().toString(); @@ -521,19 +503,18 @@ private Path getNewDataTestDirOnTestFS() throws IOException { /** * Cleans the test data directory on the test filesystem. * @return True if we removed the test dirs - * @throws IOException */ public boolean cleanupDataTestDirOnTestFS() throws IOException { boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true); - if (ret) + if (ret) { dataTestDirOnTestFS = null; + } return ret; } /** * Cleans a subdirectory under the test data directory on the test filesystem. * @return True if we removed child - * @throws IOException */ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException { Path cpath = getDataTestDirOnTestFS(subdirName); @@ -543,7 +524,6 @@ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException /** * Start a minidfscluster. * @param servers How many DNs to start. - * @throws Exception * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ @@ -552,19 +532,16 @@ public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { } /** - * Start a minidfscluster. - * This is useful if you want to run datanode on distinct hosts for things - * like HDFS block location verification. - * If you start MiniDFSCluster without host names, all instances of the - * datanodes will have the same host name. + * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things + * like HDFS block location verification. If you start MiniDFSCluster without host names, all + * instances of the datanodes will have the same host name. * @param hosts hostnames DNs to run on. * @throws Exception * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(final String hosts[]) - throws Exception { - if ( hosts != null && hosts.length != 0) { + public MiniDFSCluster startMiniDFSCluster(final String[] hosts) throws Exception { + if (hosts != null && hosts.length != 0) { return startMiniDFSCluster(hosts.length, hosts); } else { return startMiniDFSCluster(1, null); @@ -572,21 +549,19 @@ public MiniDFSCluster startMiniDFSCluster(final String hosts[]) } /** - * Start a minidfscluster. - * Can only create one. + * Start a minidfscluster. Can only create one. * @param servers How many DNs to start. * @param hosts hostnames DNs to run on. * @throws Exception * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) - throws Exception { + public MiniDFSCluster startMiniDFSCluster(int servers, final String[] hosts) throws Exception { return startMiniDFSCluster(servers, null, hosts); } private void setFs() throws IOException { - if(this.dfsCluster == null){ + if (this.dfsCluster == null) { LOG.info("Skipping setting fs because dfsCluster is null"); return; } @@ -597,8 +572,8 @@ private void setFs() throws IOException { conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE); } - public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) - throws Exception { + public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, String[] hosts) + throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -606,9 +581,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], St Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, - true, null, racks, hosts, null); + true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); @@ -616,7 +590,7 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], St // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); - //reset the test directory for test file system + // reset the test directory for test file system dataTestDirOnTestFS = null; String dataTestDir = getDataTestDir().toString(); conf.set(HConstants.HBASE_DIR, dataTestDir); @@ -632,7 +606,7 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, - null, null, null); + null, null, null); return dfsCluster; } @@ -640,6 +614,7 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in * the conf. + * *

    * Configuration conf = TEST_UTIL.getConfiguration();
    * for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
@@ -688,35 +663,35 @@ private void createDirsAndSetProperties() throws IOException {
   }
 
   /**
-   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
-   *  new column families. Default to false.
+   * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
+   * Default to false.
    */
-  public boolean isNewVersionBehaviorEnabled(){
+  public boolean isNewVersionBehaviorEnabled() {
     final String propName = "hbase.tests.new.version.behavior";
     String v = System.getProperty(propName);
-    if (v != null){
+    if (v != null) {
       return Boolean.parseBoolean(v);
     }
     return false;
   }
 
   /**
-   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
-   *  This allows to specify this parameter on the command line.
-   *   If not set, default is true.
+   * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
+   * allows to specify this parameter on the command line. If not set, default is true.
    */
-  public boolean isReadShortCircuitOn(){
+  public boolean isReadShortCircuitOn() {
     final String propName = "hbase.tests.use.shortcircuit.reads";
     String readOnProp = System.getProperty(propName);
-    if (readOnProp != null){
-      return  Boolean.parseBoolean(readOnProp);
+    if (readOnProp != null) {
+      return Boolean.parseBoolean(readOnProp);
     } else {
       return conf.getBoolean(propName, false);
     }
   }
 
-  /** Enable the short circuit read, unless configured differently.
-   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
+  /**
+   * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
+   * including skipping the hdfs checksum checks.
    */
   private void enableShortCircuit() {
     if (isReadShortCircuitOn()) {
@@ -747,9 +722,7 @@ private String createDirAndSetProperty(final String relPath, String property) {
   }
 
   /**
-   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
-   * or does nothing.
-   * @throws IOException
+   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
    */
   public void shutdownMiniDFSCluster() throws IOException {
     if (this.dfsCluster != null) {
@@ -761,269 +734,6 @@ public void shutdownMiniDFSCluster() throws IOException {
     }
   }
 
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
-   * All other options will use default values, defined in
-   * {@link StartTestingClusterOption.Builder}.
-   * @param createWALDir Whether to create a new WAL directory.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .createWALDir(createWALDir).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param createRootDir Whether to create a new root or data directory path.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
-  throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param createRootDir Whether to create a new root or data directory path.
-   * @param createWALDir Whether to create a new WAL directory.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
-      boolean createWALDir) throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
-        .createWALDir(createWALDir).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param createRootDir Whether to create a new root or data directory path.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
-    throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
-        .numDataNodes(numSlaves).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
-   *          HDFS data node number.
-   * @param createRootDir Whether to create a new root or data directory path.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
-      boolean createRootDir) throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
-        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
-   *          HDFS data node number.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
-      throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numSlaves)
-        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numRegionServers Number of region servers.
-   * @param numDataNodes Number of datanodes.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
-      throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
-        .build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
-   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
-   *          HDFS data node number.
-   * @param masterClass The class to use as HMaster, or null for default.
-   * @param rsClass The class to use as HRegionServer, or null for default.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
-      Class masterClass,
-      Class rsClass)
-      throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).masterClass(masterClass)
-        .numRegionServers(numSlaves).rsClass(rsClass)
-        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
-        .build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numRegionServers Number of region servers.
-   * @param numDataNodes Number of datanodes.
-   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
-   *          HDFS data node number.
-   * @param masterClass The class to use as HMaster, or null for default.
-   * @param rsClass The class to use as HRegionServer, or null for default.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
-      String[] dataNodeHosts, Class masterClass,
-      Class rsClass)
-    throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).masterClass(masterClass)
-        .numRegionServers(numRegionServers).rsClass(rsClass)
-        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
-        .build();
-    return startMiniCluster(option);
-  }
-
-  /**
-   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
-   * defined in {@link StartTestingClusterOption.Builder}.
-   * @param numMasters Master node number.
-   * @param numRegionServers Number of region servers.
-   * @param numDataNodes Number of datanodes.
-   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
-   *          HDFS data node number.
-   * @param masterClass The class to use as HMaster, or null for default.
-   * @param rsClass The class to use as HRegionServer, or null for default.
-   * @param createRootDir Whether to create a new root or data directory path.
-   * @param createWALDir Whether to create a new WAL directory.
-   * @return The mini HBase cluster created.
-   * @see #shutdownMiniCluster()
-   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
-   *             {@link #startMiniCluster(StartTestingClusterOption)} instead.
-   * @see #startMiniCluster(StartTestingClusterOption)
-   * @see HBASE-21071
-   */
-  @Deprecated
-  public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
-      String[] dataNodeHosts, Class masterClass,
-      Class rsClass, boolean createRootDir,
-      boolean createWALDir) throws Exception {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).masterClass(masterClass)
-        .numRegionServers(numRegionServers).rsClass(rsClass)
-        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
-        .createRootDir(createRootDir).createWALDir(createWALDir)
-        .build();
-    return startMiniCluster(option);
-  }
-
   /**
    * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
    * other options will use default values, defined in {@link StartTestingClusterOption.Builder}.
@@ -1033,13 +743,13 @@ public SingleProcessHBaseCluster startMiniCluster(int numMasters, int numRegionS
    */
   public SingleProcessHBaseCluster startMiniCluster(int numSlaves) throws Exception {
     StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
+      .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
     return startMiniCluster(option);
   }
 
   /**
-   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
-   * Option default value can be found in {@link StartTestingClusterOption.Builder}.
+   * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
+   * value can be found in {@link StartTestingClusterOption.Builder}.
    * @see #startMiniCluster(StartTestingClusterOption option)
    * @see #shutdownMiniDFSCluster()
    */
@@ -1048,12 +758,13 @@ public SingleProcessHBaseCluster startMiniCluster() throws Exception {
   }
 
   /**
-   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
-   * It modifies Configuration.  It homes the cluster data directory under a random
-   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
+   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
+   * Configuration. It homes the cluster data directory under a random subdirectory in a directory
+   * under System property test.build.data, to be cleaned up on exit.
    * @see #shutdownMiniDFSCluster()
    */
-  public SingleProcessHBaseCluster startMiniCluster(StartTestingClusterOption option) throws Exception {
+  public SingleProcessHBaseCluster startMiniCluster(StartTestingClusterOption option)
+    throws Exception {
     LOG.info("Starting up minicluster with option: {}", option);
 
     // If we already put up a cluster, fail.
@@ -1129,7 +840,6 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption
       }
     }
 
-
     getAdmin(); // create immediately the hbaseAdmin
     LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
 
@@ -1137,12 +847,13 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption
   }
 
   /**
-   * Starts up mini hbase cluster using default options.
-   * Default options can be found in {@link StartTestingClusterOption.Builder}.
+   * Starts up mini hbase cluster using default options. Default options can be found in
+   * {@link StartTestingClusterOption.Builder}.
    * @see #startMiniHBaseCluster(StartTestingClusterOption)
    * @see #shutdownMiniHBaseCluster()
    */
-  public SingleProcessHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
+  public SingleProcessHBaseCluster startMiniHBaseCluster()
+    throws IOException, InterruptedException {
     return startMiniHBaseCluster(StartTestingClusterOption.builder().build());
   }
 
@@ -1161,9 +872,9 @@ public SingleProcessHBaseCluster startMiniHBaseCluster() throws IOException, Int
    */
   @Deprecated
   public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
-      throws IOException, InterruptedException {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numRegionServers).build();
+    throws IOException, InterruptedException {
+    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters)
+      .numRegionServers(numRegionServers).build();
     return startMiniHBaseCluster(option);
   }
 
@@ -1183,9 +894,9 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe
    */
   @Deprecated
   public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
-      List rsPorts) throws IOException, InterruptedException {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
+    List rsPorts) throws IOException, InterruptedException {
+    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters)
+      .numRegionServers(numRegionServers).rsPorts(rsPorts).build();
     return startMiniHBaseCluster(option);
   }
 
@@ -1209,19 +920,18 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe
    */
   @Deprecated
   public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
-      List rsPorts, Class masterClass,
-      Class rsClass,
-      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
-    StartTestingClusterOption option = StartTestingClusterOption.builder()
-        .numMasters(numMasters).masterClass(masterClass)
-        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
-        .createRootDir(createRootDir).createWALDir(createWALDir).build();
+    List rsPorts, Class masterClass,
+    Class rsClass,
+    boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
+    StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters)
+      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
+      .createRootDir(createRootDir).createWALDir(createWALDir).build();
     return startMiniHBaseCluster(option);
   }
 
   /**
-   * Starts the hbase cluster up again after shutting it down previously in a
-   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
+   * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
+   * want to keep dfs/zk up and just stop/start hbase.
    * @param servers number of region servers
    */
   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
@@ -1229,9 +939,9 @@ public void restartHBaseCluster(int servers) throws IOException, InterruptedExce
   }
 
   public void restartHBaseCluster(int servers, List ports)
-      throws IOException, InterruptedException {
+    throws IOException, InterruptedException {
     StartTestingClusterOption option =
-        StartTestingClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
+      StartTestingClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
     restartHBaseCluster(option);
     invalidateConnection();
   }
@@ -1256,16 +966,16 @@ public void restartHBaseCluster(StartTestingClusterOption option)
   }
 
   /**
-   * @return Current mini hbase cluster. Only has something in it after a call
-   * to {@link #startMiniCluster()}.
+   * Returns current mini hbase cluster. Only has something in it after a call to
+   * {@link #startMiniCluster()}.
    * @see #startMiniCluster()
    */
   public SingleProcessHBaseCluster getMiniHBaseCluster() {
     if (this.hbaseCluster == null || this.hbaseCluster instanceof SingleProcessHBaseCluster) {
-      return (SingleProcessHBaseCluster)this.hbaseCluster;
+      return (SingleProcessHBaseCluster) this.hbaseCluster;
     }
-    throw new RuntimeException(hbaseCluster + " not an instance of " +
-                               SingleProcessHBaseCluster.class.getName());
+    throw new RuntimeException(
+      hbaseCluster + " not an instance of " + SingleProcessHBaseCluster.class.getName());
   }
 
   /**
@@ -1326,10 +1036,9 @@ private void cleanup() throws IOException {
   }
 
   /**
-   * Returns the path to the default root dir the minicluster uses. If create
-   * is true, a new root directory path is fetched irrespective of whether it has been fetched
-   * before or not. If false, previous path is used.
-   * Note: this does not cause the root dir to be created.
+   * Returns the path to the default root dir the minicluster uses. If create is true,
+   * a new root directory path is fetched irrespective of whether it has been fetched before or not.
+   * If false, previous path is used. Note: this does not cause the root dir to be created.
    * @return Fully qualified path for the default hbase root dir
    * @throws IOException
    */
@@ -1342,9 +1051,8 @@ public Path getDefaultRootDirPath(boolean create) throws IOException {
   }
 
   /**
-   * Same as {{@link HBaseTestingUtil#getDefaultRootDirPath(boolean create)}
-   * except that create flag is false.
-   * Note: this does not cause the root dir to be created.
+   * Same as {{@link HBaseTestingUtil#getDefaultRootDirPath(boolean create)} except that
+   * create flag is false. Note: this does not cause the root dir to be created.
    * @return Fully qualified path for the default hbase root dir
    * @throws IOException
    */
@@ -1353,14 +1061,12 @@ public Path getDefaultRootDirPath() throws IOException {
   }
 
   /**
-   * Creates an hbase rootdir in user home directory.  Also creates hbase
-   * version file.  Normally you won't make use of this method.  Root hbasedir
-   * is created for you as part of mini cluster startup.  You'd only use this
-   * method if you were doing manual operation.
-   * @param create This flag decides whether to get a new
-   * root or data directory path or not, if it has been fetched already.
-   * Note : Directory will be made irrespective of whether path has been fetched or not.
-   * If directory already exists, it will be overwritten
+   * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
+   * won't make use of this method. Root hbasedir is created for you as part of mini cluster
+   * startup. You'd only use this method if you were doing manual operation.
+   * @param create This flag decides whether to get a new root or data directory path or not, if it
+   *          has been fetched already. Note : Directory will be made irrespective of whether path
+   *          has been fetched or not. If directory already exists, it will be overwritten
    * @return Fully qualified path to hbase root dir
    * @throws IOException
    */
@@ -1374,8 +1080,8 @@ public Path createRootDir(boolean create) throws IOException {
   }
 
   /**
-   * Same as {@link HBaseTestingUtil#createRootDir(boolean create)}
-   * except that create flag is false.
+   * Same as {@link HBaseTestingUtil#createRootDir(boolean create)} except that create
+   * flag is false.
    * @return Fully qualified path to hbase root dir
    * @throws IOException
    */
@@ -1384,14 +1090,12 @@ public Path createRootDir() throws IOException {
   }
 
   /**
-   * Creates a hbase walDir in the user's home directory.
-   * Normally you won't make use of this method. Root hbaseWALDir
-   * is created for you as part of mini cluster startup. You'd only use this
-   * method if you were doing manual operation.
-   *
+   * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
+   * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
+   * this method if you were doing manual operation.
    * @return Fully qualified path to hbase root dir
    * @throws IOException
-  */
+   */
   public Path createWALRootDir() throws IOException {
     FileSystem fs = FileSystem.get(this.conf);
     Path walDir = getNewDataTestDirOnTestFS();
@@ -1403,7 +1107,7 @@ public Path createWALRootDir() throws IOException {
   private void setHBaseFsTmpDir() throws IOException {
     String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
     if (hbaseFsTmpDirInString == null) {
-      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
+      this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
       LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
     } else {
       LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
@@ -1449,9 +1153,8 @@ public void compact(TableName tableName, boolean major) throws IOException {
    * @return A Table instance for the created table.
    * @throws IOException
    */
-  public Table createTable(TableName tableName, String family)
-  throws IOException{
-    return createTable(tableName, new String[]{family});
+  public Table createTable(TableName tableName, String family) throws IOException {
+    return createTable(tableName, new String[] { family });
   }
 
   /**
@@ -1461,8 +1164,7 @@ public Table createTable(TableName tableName, String family)
    * @return A Table instance for the created table.
    * @throws IOException
    */
-  public Table createTable(TableName tableName, String[] families)
-  throws IOException {
+  public Table createTable(TableName tableName, String[] families) throws IOException {
     List fams = new ArrayList<>(families.length);
     for (String family : families) {
       fams.add(Bytes.toBytes(family));
@@ -1477,9 +1179,8 @@ public Table createTable(TableName tableName, String[] families)
    * @return A Table instance for the created table.
    * @throws IOException
    */
-  public Table createTable(TableName tableName, byte[] family)
-  throws IOException{
-    return createTable(tableName, new byte[][]{family});
+  public Table createTable(TableName tableName, byte[] family) throws IOException {
+    return createTable(tableName, new byte[][] { family });
   }
 
   /**
@@ -1491,7 +1192,7 @@ public Table createTable(TableName tableName, byte[] family)
    * @throws IOException
    */
   public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
-      throws IOException {
+    throws IOException {
     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
     byte[] startKey = Bytes.toBytes("aaaaa");
     byte[] endKey = Bytes.toBytes("zzzzz");
@@ -1507,8 +1208,7 @@ public Table createMultiRegionTable(TableName tableName, byte[] family, int numR
    * @return A Table instance for the created table.
    * @throws IOException
    */
-  public Table createTable(TableName tableName, byte[][] families)
-  throws IOException {
+  public Table createTable(TableName tableName, byte[][] families) throws IOException {
     return createTable(tableName, families, (byte[][]) null);
   }
 
@@ -1545,7 +1245,7 @@ public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[
    * @throws IOException
    */
   public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
-      throws IOException {
+    throws IOException {
     return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
   }
 
@@ -1559,7 +1259,7 @@ public Table createTable(TableName tableName, byte[][] families, byte[][] splitK
    * @throws IOException throws IOException
    */
   public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
-      int replicaCount) throws IOException {
+    int replicaCount) throws IOException {
     return createTable(tableName, families, splitKeys, replicaCount,
       new Configuration(getConfiguration()));
   }
@@ -1595,7 +1295,7 @@ public Table createTable(TableDescriptor htd, byte[][] families, Configuration c
    * @throws IOException if getAdmin or createTable fails
    */
   public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
-      Configuration c) throws IOException {
+    Configuration c) throws IOException {
     // Disable blooms (they are on by default as of 0.95) but we disable them here because
     // tests have hard coded counts of what to expect in block cache, etc., and blooms being
     // on is interfering.
@@ -1615,14 +1315,13 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK
    */
 
   public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
-      BloomType type, int blockSize, Configuration c) throws IOException {
+    BloomType type, int blockSize, Configuration c) throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
     for (byte[] family : families) {
       ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
-        .setBloomFilterType(type)
-        .setBlocksize(blockSize);
+        .setBloomFilterType(type).setBlocksize(blockSize);
       if (isNewVersionBehaviorEnabled()) {
-          cfdb.setNewVersionBehavior(true);
+        cfdb.setNewVersionBehavior(true);
       }
       builder.setColumnFamily(cfdb.build());
     }
@@ -1645,13 +1344,12 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK
    * @return A Table instance for the created table.
    * @throws IOException
    */
-  public Table createTable(TableDescriptor htd, byte[][] splitRows)
-      throws IOException {
+  public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
     if (isNewVersionBehaviorEnabled()) {
       for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
-         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
-           .setNewVersionBehavior(true).build());
+        builder.setColumnFamily(
+          ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
       }
     }
     if (splitRows != null) {
@@ -1694,7 +1392,7 @@ public Table createTable(TableName tableName, byte[] family, int numVersions) th
    * @return A Table instance for the created table.
    */
   public Table createTable(TableName tableName, byte[][] families, int numVersions)
-      throws IOException {
+    throws IOException {
     return createTable(tableName, families, numVersions, (byte[][]) null);
   }
 
@@ -1703,11 +1401,11 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions
    * @return A Table instance for the created table.
    */
   public Table createTable(TableName tableName, byte[][] families, int numVersions,
-      byte[][] splitKeys) throws IOException {
+    byte[][] splitKeys) throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
     for (byte[] family : families) {
-      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
-        .setMaxVersions(numVersions);
+      ColumnFamilyDescriptorBuilder cfBuilder =
+        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions);
       if (isNewVersionBehaviorEnabled()) {
         cfBuilder.setNewVersionBehavior(true);
       }
@@ -1729,7 +1427,7 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions
    * @return A Table instance for the created table.
    */
   public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
-      throws IOException {
+    throws IOException {
     return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
   }
 
@@ -1737,8 +1435,8 @@ public Table createMultiRegionTable(TableName tableName, byte[][] families, int
    * Create a table.
    * @return A Table instance for the created table.
    */
-  public Table createTable(TableName tableName, byte[][] families,
-    int numVersions, int blockSize) throws IOException {
+  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize)
+    throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
     for (byte[] family : families) {
       ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
@@ -1755,8 +1453,8 @@ public Table createTable(TableName tableName, byte[][] families,
     return getConnection().getTable(tableName);
   }
 
-  public Table createTable(TableName tableName, byte[][] families,
-      int numVersions, int blockSize, String cpName) throws IOException {
+  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize,
+    String cpName) throws IOException {
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
     for (byte[] family : families) {
       ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
@@ -1827,19 +1525,6 @@ public Table createMultiRegionTable(TableName tableName, byte[] family) throws I
     return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
   }
 
-  /**
-   * Modify a table, synchronous.
-   * @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
-   *   {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
-   * @see Admin#modifyTable(TableDescriptor)
-   * @see HBASE-22002
-   */
-  @Deprecated
-  public static void modifyTableSync(Admin admin, TableDescriptor desc)
-      throws IOException, InterruptedException {
-    admin.modifyTable(desc);
-  }
-
   /**
    * Set the number of Region replicas.
    */
@@ -1878,15 +1563,15 @@ public void deleteTableIfAny(TableName tableName) throws IOException {
   // ==========================================================================
   // Canned table and table descriptor creation
 
-  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
-  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
-  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
-  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
+  public final static byte[] fam1 = Bytes.toBytes("colfamily11");
+  public final static byte[] fam2 = Bytes.toBytes("colfamily21");
+  public final static byte[] fam3 = Bytes.toBytes("colfamily31");
+  public static final byte[][] COLUMNS = { fam1, fam2, fam3 };
   private static final int MAXVERSIONS = 3;
 
   public static final char FIRST_CHAR = 'a';
   public static final char LAST_CHAR = 'z';
-  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
+  public static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
 
   public TableDescriptorBuilder createModifyableTableDescriptor(final String name) {
@@ -1985,7 +1670,7 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws
    * @throws IOException
    */
   public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
-      WAL wal) throws IOException {
+    WAL wal) throws IOException {
     return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
   }
 
@@ -1995,15 +1680,15 @@ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDesc
    * @param stopKey
    * @param isReadOnly
    * @param families
-   * @return A region on which you must call
-   * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done.
+   * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)}
+   *         when done.
    * @throws IOException
    */
   public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
-      Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
-      throws IOException {
+    Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
+    throws IOException {
     return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
-        durability, wal, null, families);
+      durability, wal, null, families);
   }
 
   public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
@@ -2035,8 +1720,8 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s
   // ==========================================================================
 
   /**
-   * Provide an existing table name to truncate.
-   * Scans the table and issues a delete for each row read.
+   * Provide an existing table name to truncate. Scans the table and issues a delete for each row
+   * read.
    * @param tableName existing table
    * @return HTable to that new table
    * @throws IOException
@@ -2045,7 +1730,7 @@ public Table deleteTableData(TableName tableName) throws IOException {
     Table table = getConnection().getTable(tableName);
     Scan scan = new Scan();
     ResultScanner resScan = table.getScanner(scan);
-    for(Result res : resScan) {
+    for (Result res : resScan) {
       Delete del = new Delete(res.getRow());
       table.delete(del);
     }
@@ -2055,14 +1740,14 @@ public Table deleteTableData(TableName tableName) throws IOException {
   }
 
   /**
-   * Truncate a table using the admin command.
-   * Effectively disables, deletes, and recreates the table.
+   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
+   * table.
    * @param tableName table which must exist.
    * @param preserveRegions keep the existing split points
    * @return HTable for the new table
    */
-  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
-      IOException {
+  public Table truncateTable(final TableName tableName, final boolean preserveRegions)
+    throws IOException {
     Admin admin = getAdmin();
     if (!admin.isTableDisabled(tableName)) {
       admin.disableTable(tableName);
@@ -2072,11 +1757,9 @@ public Table truncateTable(final TableName tableName, final boolean preserveRegi
   }
 
   /**
-   * Truncate a table using the admin command.
-   * Effectively disables, deletes, and recreates the table.
-   * For previous behavior of issuing row deletes, see
-   * deleteTableData.
-   * Expressly does not preserve regions of existing table.
+   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
+   * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
+   * preserve regions of existing table.
    * @param tableName table which must exist.
    * @return HTable for the new table
    */
@@ -2092,7 +1775,7 @@ public Table truncateTable(final TableName tableName) throws IOException {
    * @throws IOException
    */
   public int loadTable(final Table t, final byte[] f) throws IOException {
-    return loadTable(t, new byte[][] {f});
+    return loadTable(t, new byte[][] { f });
   }
 
   /**
@@ -2103,7 +1786,7 @@ public int loadTable(final Table t, final byte[] f) throws IOException {
    * @throws IOException
    */
   public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
-    return loadTable(t, new byte[][] {f}, null, writeToWAL);
+    return loadTable(t, new byte[][] { f }, null, writeToWAL);
   }
 
   /**
@@ -2137,8 +1820,8 @@ public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOExc
    * @return Count of rows loaded.
    * @throws IOException
    */
-  public int loadTable(final Table t, final byte[][] f, byte[] value,
-      boolean writeToWAL) throws IOException {
+  public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
+    throws IOException {
     List puts = new ArrayList<>();
     for (byte[] row : HBaseTestingUtil.ROWS) {
       Put put = new Put(row);
@@ -2153,12 +1836,13 @@ public int loadTable(final Table t, final byte[][] f, byte[] value,
     return puts.size();
   }
 
-  /** A tracker for tracking and validating table rows
-   * generated with {@link HBaseTestingUtil#loadTable(Table, byte[])}
+  /**
+   * A tracker for tracking and validating table rows generated with
+   * {@link HBaseTestingUtil#loadTable(Table, byte[])}
    */
   public static class SeenRowTracker {
     int dim = 'z' - 'a' + 1;
-    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
+    int[][][] seenRows = new int[dim][dim][dim]; // count of how many times the row is seen
     byte[] startRow;
     byte[] stopRow;
 
@@ -2181,8 +1865,9 @@ public void addRow(byte[] row) {
       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
     }
 
-    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
-     * all other rows none
+    /**
+     * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
+     * rows none
      */
     public void validate() {
       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
@@ -2190,14 +1875,14 @@ public void validate() {
           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
             int count = seenRows[i(b1)][i(b2)][i(b3)];
             int expectedCount = 0;
-            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
-                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
+            if (Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0 &&
+              Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0) {
               expectedCount = 1;
             }
             if (count != expectedCount) {
-              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
+              String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
-                  "instead of " + expectedCount);
+                "instead of " + expectedCount);
             }
           }
         }
@@ -2210,7 +1895,7 @@ public int loadRegion(final HRegion r, final byte[] f) throws IOException {
   }
 
   public int loadRegion(final Region r, final byte[] f) throws IOException {
-    return loadRegion((HRegion)r, f);
+    return loadRegion((HRegion) r, f);
   }
 
   /**
@@ -2221,8 +1906,7 @@ public int loadRegion(final Region r, final byte[] f) throws IOException {
    * @return Count of rows loaded.
    * @throws IOException
    */
-  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
-  throws IOException {
+  public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
     byte[] k = new byte[3];
     int rowCount = 0;
     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
@@ -2259,7 +1943,7 @@ public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
   }
 
   public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
-      throws IOException {
+    throws IOException {
     for (int i = startRow; i < endRow; i++) {
       byte[] data = Bytes.toBytes(String.valueOf(i));
       Put put = new Put(data);
@@ -2269,20 +1953,19 @@ public void loadNumericRows(final Table t, final byte[] f, int startRow, int end
   }
 
   public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
-      throws IOException {
+    throws IOException {
     Random r = new Random();
     byte[] row = new byte[rowSize];
     for (int i = 0; i < totalRows; i++) {
       r.nextBytes(row);
       Put put = new Put(row);
-      put.addColumn(f, new byte[]{0}, new byte[]{0});
+      put.addColumn(f, new byte[] { 0 }, new byte[] { 0 });
       t.put(put);
     }
   }
 
   public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
-      int replicaId)
-      throws IOException {
+    int replicaId) throws IOException {
     for (int i = startRow; i < endRow; i++) {
       String failMsg = "Failed verification of row :" + i;
       byte[] data = Bytes.toBytes(String.valueOf(i));
@@ -2293,29 +1976,28 @@ public void verifyNumericRows(Table table, final byte[] f, int startRow, int end
       assertTrue(failMsg, result.containsColumn(f, null));
       assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
       Cell cell = result.getColumnLatestCell(f, null);
-      assertTrue(failMsg,
-        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
-          cell.getValueLength()));
+      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
+        cell.getValueOffset(), cell.getValueLength()));
     }
   }
 
   public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
-      throws IOException {
-    verifyNumericRows((HRegion)region, f, startRow, endRow);
+    throws IOException {
+    verifyNumericRows((HRegion) region, f, startRow, endRow);
   }
 
   public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
-      throws IOException {
+    throws IOException {
     verifyNumericRows(region, f, startRow, endRow, true);
   }
 
   public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
-      final boolean present) throws IOException {
-    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
+    final boolean present) throws IOException {
+    verifyNumericRows((HRegion) region, f, startRow, endRow, present);
   }
 
   public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
-      final boolean present) throws IOException {
+    final boolean present) throws IOException {
     for (int i = startRow; i < endRow; i++) {
       String failMsg = "Failed verification of row :" + i;
       byte[] data = Bytes.toBytes(String.valueOf(i));
@@ -2328,14 +2010,13 @@ public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int
       assertTrue(failMsg, result.containsColumn(f, null));
       assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
       Cell cell = result.getColumnLatestCell(f, null);
-      assertTrue(failMsg,
-        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
-          cell.getValueLength()));
+      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
+        cell.getValueOffset(), cell.getValueLength()));
     }
   }
 
   public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
-      throws IOException {
+    throws IOException {
     for (int i = startRow; i < endRow; i++) {
       byte[] data = Bytes.toBytes(String.valueOf(i));
       Delete delete = new Delete(data);
@@ -2363,9 +2044,9 @@ public static int countRows(final Table table, final Scan scan) throws IOExcepti
     }
   }
 
-  public int countRows(final Table table, final byte[]... families) throws IOException {
+  public static int countRows(final Table table, final byte[]... families) throws IOException {
     Scan scan = new Scan();
-    for (byte[] family: families) {
+    for (byte[] family : families) {
       scan.addFamily(family);
     }
     return countRows(table, scan);
@@ -2375,28 +2056,22 @@ public int countRows(final Table table, final byte[]... families) throws IOExcep
    * Return the number of rows in the given table.
    */
   public int countRows(final TableName tableName) throws IOException {
-    Table table = getConnection().getTable(tableName);
-    try {
+    try (Table table = getConnection().getTable(tableName)) {
       return countRows(table);
-    } finally {
-      table.close();
     }
   }
 
-  public int countRows(final Region region) throws IOException {
+  public static int countRows(final Region region) throws IOException {
     return countRows(region, new Scan());
   }
 
-  public int countRows(final Region region, final Scan scan) throws IOException {
-    InternalScanner scanner = region.getScanner(scan);
-    try {
+  public static int countRows(final Region region, final Scan scan) throws IOException {
+    try (InternalScanner scanner = region.getScanner(scan)) {
       return countRows(scanner);
-    } finally {
-      scanner.close();
     }
   }
 
-  public int countRows(final InternalScanner scanner) throws IOException {
+  public static int countRows(final InternalScanner scanner) throws IOException {
     int scannedCount = 0;
     List results = new ArrayList<>();
     boolean hasMore = true;
@@ -2412,14 +2087,12 @@ public int countRows(final InternalScanner scanner) throws IOException {
    * Return an md5 digest of the entire contents of a table.
    */
   public String checksumRows(final Table table) throws Exception {
-
-    Scan scan = new Scan();
-    ResultScanner results = table.getScanner(scan);
     MessageDigest digest = MessageDigest.getInstance("MD5");
-    for (Result res : results) {
-      digest.update(res.getRow());
+    try (ResultScanner results = table.getScanner(new Scan())) {
+      for (Result res : results) {
+        digest.update(res.getRow());
+      }
     }
-    results.close();
     return digest.toString();
   }
 
@@ -2439,68 +2112,51 @@ public String checksumRows(final Table table) throws Exception {
     }
   }
 
-  public static final byte[][] KEYS = {
-    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
-    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
-    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
-    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
-    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
-    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
-    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
-    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
-    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
-  };
-
-  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
-      Bytes.toBytes("bbb"),
-      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
-      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
-      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
-      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
-      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
-      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
-      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
-      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
-  };
-
-  /**
-   * Create rows in hbase:meta for regions of the specified table with the specified
-   * start keys.  The first startKey should be a 0 length byte array if you
-   * want to form a proper range of regions.
-   * @param conf
-   * @param htd
-   * @param startKeys
+  public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
+    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
+    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
+    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
+    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
+    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
+    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") };
+
+  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"),
+    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
+    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
+    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
+    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
+    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
+    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") };
+
+  /**
+   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
+   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
    * @return list of region info for regions added to meta
-   * @throws IOException
    */
   public List createMultiRegionsInMeta(final Configuration conf,
-      final TableDescriptor htd, byte [][] startKeys)
-  throws IOException {
-    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
-    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
-    List newRegions = new ArrayList<>(startKeys.length);
-    MetaTableAccessor
-        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
-    // add custom ones
-    for (int i = 0; i < startKeys.length; i++) {
-      int j = (i + 1) % startKeys.length;
-      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
-          .setStartKey(startKeys[i])
-          .setEndKey(startKeys[j])
-          .build();
-      MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
-      newRegions.add(hri);
+    final TableDescriptor htd, byte[][] startKeys) throws IOException {
+    try (Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
+      Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
+      List newRegions = new ArrayList<>(startKeys.length);
+      MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
+        TableState.State.ENABLED);
+      // add custom ones
+      for (int i = 0; i < startKeys.length; i++) {
+        int j = (i + 1) % startKeys.length;
+        RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i])
+          .setEndKey(startKeys[j]).build();
+        MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
+        newRegions.add(hri);
+      }
+      return newRegions;
     }
-
-    meta.close();
-    return newRegions;
   }
 
   /**
    * Create an unmanaged WAL. Be sure to close it when you're through.
    */
   public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
-      throws IOException {
+    throws IOException {
     // The WAL subsystem will use the default rootDir rather than the passed in rootDir
     // unless I pass along via the conf.
     Configuration confForWAL = new Configuration(conf);
@@ -2508,13 +2164,12 @@ public static WAL createWal(final Configuration conf, final Path rootDir, final
     return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
   }
 
-
   /**
    * Create a region with it's own WAL. Be sure to call
    * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
    */
   public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
-      final Configuration conf, final TableDescriptor htd) throws IOException {
+    final Configuration conf, final TableDescriptor htd) throws IOException {
     return createRegionAndWAL(info, rootDir, conf, htd, true);
   }
 
@@ -2523,20 +2178,20 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD
    * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
    */
   public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
-      final Configuration conf, final TableDescriptor htd, BlockCache blockCache)
-      throws IOException {
+    final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException {
     HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
     region.setBlockCache(blockCache);
     region.initialize();
     return region;
   }
+
   /**
    * Create a region with it's own WAL. Be sure to call
    * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
    */
   public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
-      final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
-      throws IOException {
+    final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
+    throws IOException {
     HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
     region.setMobFileCache(mobFileCache);
     region.initialize();
@@ -2548,84 +2203,19 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD
    * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
    */
   public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
-      final Configuration conf, final TableDescriptor htd, boolean initialize)
-      throws IOException {
-    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
-      0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
+    final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
+    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
+      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
     WAL wal = createWal(conf, rootDir, info);
     return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
   }
 
   /**
-   * Returns all rows from the hbase:meta table.
-   *
-   * @throws IOException When reading the rows fails.
-   */
-  public List getMetaTableRows() throws IOException {
-    // TODO: Redo using MetaTableAccessor class
-    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
-    List rows = new ArrayList<>();
-    ResultScanner s = t.getScanner(new Scan());
-    for (Result result : s) {
-      LOG.info("getMetaTableRows: row -> " +
-        Bytes.toStringBinary(result.getRow()));
-      rows.add(result.getRow());
-    }
-    s.close();
-    t.close();
-    return rows;
-  }
-
-  /**
-   * Returns all rows from the hbase:meta table for a given user table
-   *
-   * @throws IOException When reading the rows fails.
-   */
-  public List getMetaTableRows(TableName tableName) throws IOException {
-    // TODO: Redo using MetaTableAccessor.
-    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
-    List rows = new ArrayList<>();
-    ResultScanner s = t.getScanner(new Scan());
-    for (Result result : s) {
-      RegionInfo info = CatalogFamilyFormat.getRegionInfo(result);
-      if (info == null) {
-        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
-        // TODO figure out what to do for this new hosed case.
-        continue;
-      }
-
-      if (info.getTable().equals(tableName)) {
-        LOG.info("getMetaTableRows: row -> " +
-            Bytes.toStringBinary(result.getRow()) + info);
-        rows.add(result.getRow());
-      }
-    }
-    s.close();
-    t.close();
-    return rows;
-  }
-
-  /**
-   * Returns all regions of the specified table
-   *
-   * @param tableName the table name
-   * @return all regions of the specified table
-   * @throws IOException when getting the regions fails.
-   */
-  private List getRegions(TableName tableName) throws IOException {
-    try (Admin admin = getConnection().getAdmin()) {
-      return admin.getRegions(tableName);
-    }
-  }
-
-  /*
    * Find any other region server which is different from the one identified by parameter
-   * @param rs
    * @return another region server
    */
   public HRegionServer getOtherRegionServer(HRegionServer rs) {
-    for (JVMClusterUtil.RegionServerThread rst :
-      getMiniHBaseCluster().getRegionServerThreads()) {
+    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
       if (!(rst.getRegionServer() == rs)) {
         return rst.getRegionServer();
       }
@@ -2634,26 +2224,21 @@ public HRegionServer getOtherRegionServer(HRegionServer rs) {
   }
 
   /**
-   * Tool to get the reference to the region server object that holds the
-   * region of the specified user table.
+   * Tool to get the reference to the region server object that holds the region of the specified
+   * user table.
    * @param tableName user table to lookup in hbase:meta
    * @return region server that holds it, null if the row doesn't exist
-   * @throws IOException
-   * @throws InterruptedException
    */
   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
-      throws IOException, InterruptedException {
-    List regions = getRegions(tableName);
+    throws IOException, InterruptedException {
+    List regions = getAdmin().getRegions(tableName);
     if (regions == null || regions.isEmpty()) {
       return null;
     }
-    LOG.debug("Found " + regions.size() + " regions for table " +
-        tableName);
+    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
 
-    byte[] firstRegionName = regions.stream()
-        .filter(r -> !r.isOffline())
-        .map(RegionInfo::getRegionName)
-        .findFirst()
+    byte[] firstRegionName =
+      regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
         .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
 
     LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
@@ -2661,36 +2246,33 @@ public HRegionServer getRSForFirstRegionInTable(TableName tableName)
       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
-    while(retrier.shouldRetry()) {
+    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
+    while (retrier.shouldRetry()) {
       int index = getMiniHBaseCluster().getServerWith(firstRegionName);
       if (index != -1) {
         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
       }
-      // Came back -1.  Region may not be online yet.  Sleep a while.
+      // Came back -1. Region may not be online yet. Sleep a while.
       retrier.sleepUntilNextRetry();
     }
     return null;
   }
 
   /**
-   * Starts a MiniMRCluster with a default number of
-   * TaskTracker's.
-   *
+   * Starts a MiniMRCluster with a default number of TaskTracker's.
    * @throws IOException When starting the cluster fails.
    */
   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
     // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
-    conf.setIfUnset(
-        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
-        "99.0");
+    conf.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
+      "99.0");
     startMiniMapReduceCluster(2);
     return mrCluster;
   }
 
   /**
-   * Tasktracker has a bug where changing the hadoop.log.dir system property
-   * will not change its internal static LOG_DIR variable.
+   * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
+   * internal static LOG_DIR variable.
    */
   private void forceChangeTaskLogDir() {
     Field logDirField;
@@ -2717,7 +2299,7 @@ private void forceChangeTaskLogDir() {
   /**
    * Starts a MiniMRCluster. Call {@link #setFileSystemURI(String)} to use a different
    * filesystem.
-   * @param servers  The number of TaskTracker's to start.
+   * @param servers The number of TaskTracker's to start.
    * @throws IOException When starting the cluster fails.
    */
   private void startMiniMapReduceCluster(final int servers) throws IOException {
@@ -2742,21 +2324,21 @@ private void startMiniMapReduceCluster(final int servers) throws IOException {
     ////
 
     // Allow the user to override FS URI for this map-reduce cluster to use.
-    mrCluster = new MiniMRCluster(servers,
-      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
-      null, null, new JobConf(this.conf));
+    mrCluster =
+      new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(),
+        1, null, null, new JobConf(this.conf));
     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
     if (jobConf == null) {
       jobConf = mrCluster.createJobConf();
     }
 
-    jobConf.set("mapreduce.cluster.local.dir",
-      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
+    // Hadoop MiniMR overwrites this while it should not
+    jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
     LOG.info("Mini mapreduce cluster started");
 
     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
-    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
-    // necessary config properties here.  YARN-129 required adding a few properties.
+    // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
+    // necessary config properties here. YARN-129 required adding a few properties.
     conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
     // this for mrv2 support; mr1 ignores this
     conf.set("mapreduce.framework.name", "yarn");
@@ -2769,18 +2351,15 @@ private void startMiniMapReduceCluster(final int servers) throws IOException {
     if (historyAddress != null) {
       conf.set("mapreduce.jobhistory.address", historyAddress);
     }
-    String schedulerAddress =
-      jobConf.get("yarn.resourcemanager.scheduler.address");
+    String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
     if (schedulerAddress != null) {
       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
     }
-    String mrJobHistoryWebappAddress =
-      jobConf.get("mapreduce.jobhistory.webapp.address");
+    String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
     if (mrJobHistoryWebappAddress != null) {
       conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
     }
-    String yarnRMWebappAddress =
-      jobConf.get("yarn.resourcemanager.webapp.address");
+    String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
     if (yarnRMWebappAddress != null) {
       conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
     }
@@ -2804,15 +2383,15 @@ public void shutdownMiniMapReduceCluster() {
    * Create a stubbed out RegionServerService, mainly for getting FS.
    */
   public RegionServerServices createMockRegionServerService() throws IOException {
-    return createMockRegionServerService((ServerName)null);
+    return createMockRegionServerService((ServerName) null);
   }
 
   /**
-   * Create a stubbed out RegionServerService, mainly for getting FS.
-   * This version is used by TestTokenAuthentication
+   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
+   * TestTokenAuthentication
    */
-  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
-      IOException {
+  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc)
+    throws IOException {
     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
     rss.setFileSystem(getTestFileSystem());
     rss.setRpcServer(rpc);
@@ -2820,8 +2399,8 @@ public RegionServerServices createMockRegionServerService(RpcServerInterface rpc
   }
 
   /**
-   * Create a stubbed out RegionServerService, mainly for getting FS.
-   * This version is used by TestOpenRegionHandler
+   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
+   * TestOpenRegionHandler
    */
   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
@@ -2829,21 +2408,8 @@ public RegionServerServices createMockRegionServerService(ServerName name) throw
     return rss;
   }
 
-  /**
-   * Switches the logger for the given class to DEBUG level.
-   * @param clazz The class for which to switch to debug logging.
-   * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
-   *             HBase only uses log4j. You should do this by your own as it you know which log
-   *             framework you are using then set the log level to debug is very easy.
-   */
-  @Deprecated
-  public void enableDebug(Class clazz) {
-    Log4jUtils.enableDebug(clazz);
-  }
-
   /**
    * Expire the Master's session
-   * @throws Exception
    */
   public void expireMasterSession() throws Exception {
     HMaster master = getMiniHBaseCluster().getMaster();
@@ -2872,31 +2438,29 @@ private void decrementMinRegionServerCount() {
   }
 
   private void decrementMinRegionServerCount(Configuration conf) {
-    int currentCount = conf.getInt(
-        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
+    int currentCount = conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
     if (currentCount != -1) {
-      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
-          Math.max(currentCount - 1, 1));
+      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, Math.max(currentCount - 1, 1));
     }
   }
 
   public void expireSession(ZKWatcher nodeZK) throws Exception {
-   expireSession(nodeZK, false);
+    expireSession(nodeZK, false);
   }
 
   /**
    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
    * http://hbase.apache.org/book.html#trouble.zookeeper
+   * 

* There are issues when doing this: - * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html - * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105 - * + *

    + *
  1. http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
  2. + *
  3. https://issues.apache.org/jira/browse/ZOOKEEPER-1105
  4. + *
* @param nodeZK - the ZK watcher to expire - * @param checkStatus - true to check if we can create a Table with the - * current configuration. + * @param checkStatus - true to check if we can create a Table with the current configuration. */ - public void expireSession(ZKWatcher nodeZK, boolean checkStatus) - throws Exception { + public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception { Configuration c = new Configuration(this.conf); String quorumServers = ZKConfig.getZKQuorumServersString(c); ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper(); @@ -2904,30 +2468,29 @@ public void expireSession(ZKWatcher nodeZK, boolean checkStatus) long sessionID = zk.getSessionId(); // Expiry seems to be asynchronous (see comment from P. Hunt in [1]), - // so we create a first watcher to be sure that the - // event was sent. We expect that if our watcher receives the event - // other watchers on the same machine will get is as well. + // so we create a first watcher to be sure that the + // event was sent. We expect that if our watcher receives the event + // other watchers on the same machine will get is as well. // When we ask to close the connection, ZK does not close it before - // we receive all the events, so don't have to capture the event, just - // closing the connection should be enough. - ZooKeeper monitor = new ZooKeeper(quorumServers, - 1000, new org.apache.zookeeper.Watcher(){ + // we receive all the events, so don't have to capture the event, just + // closing the connection should be enough. + ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() { @Override public void process(WatchedEvent watchedEvent) { - LOG.info("Monitor ZKW received event="+watchedEvent); + LOG.info("Monitor ZKW received event=" + watchedEvent); } - } , sessionID, password); + }, sessionID, password); // Making it expire - ZooKeeper newZK = new ZooKeeper(quorumServers, - 1000, EmptyWatcher.instance, sessionID, password); + ZooKeeper newZK = + new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password); - //ensure that we have connection to the server before closing down, otherwise - //the close session event will be eaten out before we start CONNECTING state + // ensure that we have connection to the server before closing down, otherwise + // the close session event will be eaten out before we start CONNECTING state long start = EnvironmentEdgeManager.currentTime(); - while (newZK.getState() != States.CONNECTED - && EnvironmentEdgeManager.currentTime() - start < 1000) { - Thread.sleep(1); + while (newZK.getState() != States.CONNECTED && + EnvironmentEdgeManager.currentTime() - start < 1000) { + Thread.sleep(1); } newZK.close(); LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID)); @@ -2942,7 +2505,6 @@ public void process(WatchedEvent watchedEvent) { /** * Get the Mini HBase cluster. - * * @return hbase cluster * @see #getHBaseClusterInterface() */ @@ -2952,15 +2514,15 @@ public SingleProcessHBaseCluster getHBaseCluster() { /** * Returns the HBaseCluster instance. - *

Returned object can be any of the subclasses of HBaseCluster, and the - * tests referring this should not assume that the cluster is a mini cluster or a - * distributed one. If the test only works on a mini cluster, then specific - * method {@link #getMiniHBaseCluster()} can be used instead w/o the - * need to type-cast. + *

+ * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this + * should not assume that the cluster is a mini cluster or a distributed one. If the test only + * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used + * instead w/o the need to type-cast. */ public HBaseClusterInterface getHBaseClusterInterface() { - //implementation note: we should rename this method as #getHBaseCluster(), - //but this would require refactoring 90+ calls. + // implementation note: we should rename this method as #getHBaseCluster(), + // but this would require refactoring 90+ calls. return hbaseCluster; } @@ -2968,10 +2530,11 @@ public HBaseClusterInterface getHBaseClusterInterface() { * Resets the connections so that the next time getConnection() is called, a new connection is * created. This is needed in cases where the entire cluster / all the masters are shutdown and * the connection is not valid anymore. + *

* TODO: There should be a more coherent way of doing this. Unfortunately the way tests are - * written, not all start() stop() calls go through this class. Most tests directly operate on - * the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to - * maintain the connection state automatically. Cleaning this is a much bigger refactor. + * written, not all start() stop() calls go through this class. Most tests directly operate on the + * underlying mini/local hbase cluster. That makes it difficult for this wrapper class to maintain + * the connection state automatically. Cleaning this is a much bigger refactor. */ public void invalidateConnection() throws IOException { closeConnection(); @@ -2979,14 +2542,13 @@ public void invalidateConnection() throws IOException { final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY); final String masterConfAfter = getMiniHBaseCluster().getConf().get(HConstants.MASTER_ADDRS_KEY); LOG.info("Invalidated connection. Updating master addresses before: {} after: {}", - masterConfigBefore, masterConfAfter); + masterConfigBefore, masterConfAfter); conf.set(HConstants.MASTER_ADDRS_KEY, - getMiniHBaseCluster().getConf().get(HConstants.MASTER_ADDRS_KEY)); + getMiniHBaseCluster().getConf().get(HConstants.MASTER_ADDRS_KEY)); } /** - * Get a shared Connection to the cluster. - * this method is thread safe. + * Get a shared Connection to the cluster. this method is thread safe. * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster. */ public Connection getConnection() throws IOException { @@ -2994,8 +2556,7 @@ public Connection getConnection() throws IOException { } /** - * Get a assigned Connection to the cluster. - * this method is thread safe. + * Get a assigned Connection to the cluster. this method is thread safe. * @param user assigned user * @return A Connection with assigned user. */ @@ -3004,9 +2565,9 @@ public Connection getConnection(User user) throws IOException { } /** - * Get a shared AsyncClusterConnection to the cluster. - * this method is thread safe. - * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown of cluster. + * Get a shared AsyncClusterConnection to the cluster. this method is thread safe. + * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown + * of cluster. */ public AsyncClusterConnection getAsyncConnection() throws IOException { try { @@ -3015,7 +2576,7 @@ public AsyncClusterConnection getAsyncConnection() throws IOException { try { User user = UserProvider.instantiate(conf).getCurrent(); connection = getAsyncConnection(user); - } catch(IOException ioe) { + } catch (IOException ioe) { throw new UncheckedIOException("Failed to create connection", ioe); } } @@ -3027,8 +2588,7 @@ public AsyncClusterConnection getAsyncConnection() throws IOException { } /** - * Get a assigned AsyncClusterConnection to the cluster. - * this method is thread safe. + * Get a assigned AsyncClusterConnection to the cluster. this method is thread safe. * @param user assigned user * @return An AsyncClusterConnection with assigned user. */ @@ -3048,11 +2608,11 @@ public void closeConnection() throws IOException { } /** - * Returns an Admin instance which is shared between HBaseTestingUtility instance users. - * Closing it has no effect, it will be closed automatically when the cluster shutdowns + * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing + * it has no effect, it will be closed automatically when the cluster shutdowns */ public Admin getAdmin() throws IOException { - if (hbaseAdmin == null){ + if (hbaseAdmin == null) { this.hbaseAdmin = getConnection().getAdmin(); } return hbaseAdmin; @@ -3069,8 +2629,7 @@ public Hbck getHbck() throws IOException { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(String regionName) throws IOException { unassignRegion(Bytes.toBytes(regionName)); @@ -3078,18 +2637,16 @@ public void unassignRegion(String regionName) throws IOException { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(byte[] regionName) throws IOException { - getAdmin().unassign(regionName, true); + getAdmin().unassign(regionName); } /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. + * @param row The row to find the containing region. + * @param table The table to find the region. */ public void unassignRegionByRow(String row, RegionLocator table) throws IOException { unassignRegionByRow(Bytes.toBytes(row), table); @@ -3097,9 +2654,8 @@ public void unassignRegionByRow(String row, RegionLocator table) throws IOExcept /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. + * @param row The row to find the containing region. + * @param table The table to find the region. * @throws IOException */ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException { @@ -3127,7 +2683,7 @@ public HRegion getSplittableRegion(TableName tableName, int maxAttempts) { } regCount = regions.size(); // There are chances that before we get the region for the table from an RS the region may - // be going for CLOSE. This may be because online schema change is enabled + // be going for CLOSE. This may be because online schema change is enabled if (regCount > 0) { idx = random.nextInt(regCount); // if we have just tried this region, there is no need to try again @@ -3156,13 +2712,13 @@ public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, /** * Set the MiniDFSCluster * @param cluster cluster to use - * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before - * it is set. + * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it + * is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) - throws IllegalStateException, IOException { + throws IllegalStateException, IOException { if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) { throw new IllegalStateException("DFSCluster is already running! Shut it down first."); } @@ -3175,19 +2731,16 @@ public FileSystem getTestFileSystem() throws IOException { } /** - * Wait until all regions in a table have been assigned. Waits default timeout before giving up + * Wait until all regions in a table have been assigned. Waits default timeout before giving up * (30 seconds). * @param table Table to wait on. - * @throws InterruptedException - * @throws IOException */ - public void waitTableAvailable(TableName table) - throws InterruptedException, IOException { + public void waitTableAvailable(TableName table) throws InterruptedException, IOException { waitTableAvailable(table.getName(), 30000); } public void waitTableAvailable(TableName table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(table)); } @@ -3197,7 +2750,7 @@ public void waitTableAvailable(TableName table, long timeoutMillis) * @param timeoutMillis Timeout. */ public void waitTableAvailable(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table))); } @@ -3228,7 +2781,7 @@ public String explainTableAvailability(TableName tableName) throws IOException { } public String explainTableState(final TableName table, TableState.State state) - throws IOException { + throws IOException { TableState tableState = MetaTableAccessor.getTableState(getConnection(), table); if (tableState == null) { return "TableState in META: No table state in META for table " + table + @@ -3256,84 +2809,72 @@ public boolean visit(Result r) throws IOException { return true; } }; - MetaTableAccessor.scanMeta(getConnection(), null, null, - ClientMetaTableAccessor.QueryType.TABLE, Integer.MAX_VALUE, visitor); + MetaTableAccessor.scanMeta(getConnection(), null, null, ClientMetaTableAccessor.QueryType.TABLE, + Integer.MAX_VALUE, visitor); return lastTableState.get(); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. Will timeout after default period (30 seconds) - * Tolerates nonexistent table. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent + * table. * @param table the table to wait on. * @throws InterruptedException if interrupted while waiting * @throws IOException if an IO problem is encountered */ - public void waitTableEnabled(TableName table) - throws InterruptedException, IOException { + public void waitTableEnabled(TableName table) throws InterruptedException, IOException { waitTableEnabled(table, 30000); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. * @see #waitTableEnabled(TableName, long) * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked enabled. - * @throws InterruptedException - * @throws IOException */ public void waitTableEnabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableEnabled(TableName.valueOf(table), timeoutMillis); } - public void waitTableEnabled(TableName table, long timeoutMillis) - throws IOException { + public void waitTableEnabled(TableName table, long timeoutMillis) throws IOException { waitFor(timeoutMillis, predicateTableEnabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' - * Will timeout after default period (30 seconds) + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout + * after default period (30 seconds) * @param table Table to wait on. - * @throws InterruptedException - * @throws IOException */ - public void waitTableDisabled(byte[] table) - throws InterruptedException, IOException { + public void waitTableDisabled(byte[] table) throws InterruptedException, IOException { waitTableDisabled(table, 30000); } public void waitTableDisabled(TableName table, long millisTimeout) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(millisTimeout, predicateTableDisabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked disabled. - * @throws InterruptedException - * @throws IOException */ public void waitTableDisabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableDisabled(TableName.valueOf(table), timeoutMillis); } /** - * Make sure that at least the specified number of region servers - * are running + * Make sure that at least the specified number of region servers are running * @param num minimum number of region servers that should be running * @return true if we started some servers - * @throws IOException */ - public boolean ensureSomeRegionServersAvailable(final int num) - throws IOException { + public boolean ensureSomeRegionServersAvailable(final int num) throws IOException { boolean startedServer = false; SingleProcessHBaseCluster hbaseCluster = getMiniHBaseCluster(); - for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i getAllOnlineRegions(SingleProcessHBaseCluster cluster) - throws IOException { + throws IOException { NavigableSet online = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { + for (RegionInfo region : ProtobufUtil + .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -3413,8 +2943,7 @@ public static NavigableSet getAllOnlineRegions(SingleProcessHBaseCluster } for (MasterThread mt : cluster.getLiveMasterThreads()) { try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { + for (RegionInfo region : ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -3427,26 +2956,22 @@ public static NavigableSet getAllOnlineRegions(SingleProcessHBaseCluster } /** - * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and - * makes tests linger. Here is the exception you'll see: + * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests + * linger. Here is the exception you'll see: + * *

    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
    * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
    * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
    * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
    * 
+ * * @param stream A DFSClient.DFSOutputStream. - * @param max - * @throws NoSuchFieldException - * @throws SecurityException - * @throws IllegalAccessException - * @throws IllegalArgumentException - */ - public static void setMaxRecoveryErrorCount(final OutputStream stream, - final int max) { + */ + public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) { try { - Class [] clazzes = DFSClient.class.getDeclaredClasses(); - for (Class clazz: clazzes) { + Class[] clazzes = DFSClient.class.getDeclaredClasses(); + for (Class clazz : clazzes) { String className = clazz.getSimpleName(); if (className.equals("DFSOutputStream")) { if (clazz.isInstance(stream)) { @@ -3469,7 +2994,7 @@ public static void setMaxRecoveryErrorCount(final OutputStream stream, * @return true if the region is assigned false otherwise. */ public boolean assignRegion(final RegionInfo regionInfo) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager(); am.assign(regionInfo); return AssignmentTestingUtil.waitForAssignment(am, regionInfo); @@ -3477,20 +3002,17 @@ public boolean assignRegion(final RegionInfo regionInfo) /** * Move region to destination server and wait till region is completely moved and online - * * @param destRegion region to move * @param destServer destination server of the region - * @throws InterruptedException - * @throws IOException */ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) - throws InterruptedException, IOException { + throws InterruptedException, IOException { HMaster master = getMiniHBaseCluster().getMaster(); // TODO: Here we start the move. The move can take a while. getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer); while (true) { - ServerName serverName = master.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(destRegion); + ServerName serverName = + master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion); if (serverName != null && serverName.equals(destServer)) { assertRegionOnServer(destRegion, serverName, 2000); break; @@ -3500,13 +3022,10 @@ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, up to a configuable timeout value (default is 60 seconds) - * This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. + * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a + * configuable timeout value (default is 60 seconds) This means all regions have been deployed, + * master has been informed and updated hbase:meta with the regions deployed server. * @param tableName the table name - * @throws IOException */ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException { waitUntilAllRegionsAssigned(tableName, @@ -3515,27 +3034,24 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce /** * Waith until all system table's regions get assigned - * @throws IOException */ public void waitUntilAllSystemRegionsAssigned() throws IOException { waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, or until timeout. This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. + * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until + * timeout. This means all regions have been deployed, master has been informed and updated + * hbase:meta with the regions deployed server. * @param tableName the table name * @param timeout timeout, in milliseconds - * @throws IOException */ public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) - throws IOException { + throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + - timeout + "ms"); + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @Override public String explainFailure() throws IOException { @@ -3557,17 +3073,17 @@ public boolean evaluate() throws IOException { // (for fault tolerance testing). tableFound = true; byte[] server = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (server == null) { return false; } else { byte[] startCode = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = - ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + - Bytes.toLong(startCode)); + ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + + Bytes.toLong(startCode)); if (!getHBaseClusterInterface().isDistributedCluster() && - getHBaseCluster().isKilledRS(serverName)) { + getHBaseCluster().isKilledRS(serverName)) { return false; } } @@ -3578,7 +3094,8 @@ public boolean evaluate() throws IOException { } } if (!tableFound) { - LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?"); + LOG.warn( + "Didn't find the entries for table " + tableName + " in meta, already deleted?"); } return tableFound; } @@ -3609,17 +3126,16 @@ public boolean evaluate() throws IOException { } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - Get get) throws IOException { + public static List getFromStoreFile(HStore store, Get get) throws IOException { Scan scan = new Scan(get); InternalScanner scanner = (InternalScanner) store.getScanner(scan, - scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), - // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set - // readpoint 0. - 0); + scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), + // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set + // readpoint 0. + 0); List result = new ArrayList<>(); scanner.next(result); @@ -3636,47 +3152,42 @@ public static List getFromStoreFile(HStore store, /** * Create region split keys between startkey and endKey - * - * @param startKey - * @param endKey * @param numRegions the number of regions to be created. it has to be greater than 3. * @return resulting split keys */ - public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){ - assertTrue(numRegions>3); - byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] result = new byte[tmpSplitKeys.length+1][]; + public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) { + assertTrue(numRegions > 3); + byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); + byte[][] result = new byte[tmpSplitKeys.length + 1][]; System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length); result[0] = HConstants.EMPTY_BYTE_ARRAY; return result; } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - byte [] row, - NavigableSet columns - ) throws IOException { + public static List getFromStoreFile(HStore store, byte[] row, NavigableSet columns) + throws IOException { Get get = new Get(row); Map> s = get.getFamilyMap(); s.put(store.getColumnFamilyDescriptor().getName(), columns); - return getFromStoreFile(store,get); + return getFromStoreFile(store, get); } - public static void assertKVListsEqual(String additionalMsg, - final List expected, - final List actual) { + public static void assertKVListsEqual(String additionalMsg, final List expected, + final List actual) { final int eLen = expected.size(); final int aLen = actual.size(); final int minLen = Math.min(eLen, aLen); - int i; - for (i = 0; i < minLen - && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; - ++i) {} + int i = 0; + while (i < minLen && + CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0) { + i++; + } if (additionalMsg == null) { additionalMsg = ""; @@ -3686,10 +3197,9 @@ public static void assertKVListsEqual(String additionalMsg, } if (eLen != aLen || i != minLen) { - throw new AssertionError( - "Expected and actual KV arrays differ at position " + i + ": " + - safeGetAsStr(expected, i) + " (length " + eLen +") vs. " + - safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg); + throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": " + + safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + safeGetAsStr(actual, i) + + " (length " + aLen + ")" + additionalMsg); } } @@ -3702,26 +3212,20 @@ public static String safeGetAsStr(List lst, int i) { } public String getClusterKey() { - return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" - + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" - + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - } - - /** Creates a random table with the given parameters */ - public Table createRandomTable(TableName tableName, - final Collection families, - final int maxVersions, - final int numColsPerRow, - final int numFlushes, - final int numRegions, - final int numRowsPerFlush) - throws IOException, InterruptedException { - - LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + - " regions, " + numFlushes + " storefiles per region, " + - numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + - "\n"); + return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + } + + /** + * Creates a random table with the given parameters + */ + public Table createRandomTable(TableName tableName, final Collection families, + final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, + final int numRowsPerFlush) throws IOException, InterruptedException { + LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + + maxVersions + "\n"); final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L); final int numCF = families.size(); @@ -3739,11 +3243,9 @@ public Table createRandomTable(TableName tableName, final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; - final Table table = createTable(tableName, cfBytes, - maxVersions, - Bytes.toBytes(String.format(keyFormat, splitStartKey)), - Bytes.toBytes(String.format(keyFormat, splitEndKey)), - numRegions); + final Table table = createTable(tableName, cfBytes, maxVersions, + Bytes.toBytes(String.format(keyFormat, splitStartKey)), + Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); @@ -3753,8 +3255,8 @@ public Table createRandomTable(TableName tableName, for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { - final byte[] row = Bytes.toBytes(String.format(keyFormat, - actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); + final byte[] row = Bytes.toBytes( + String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); Put put = new Put(row); Delete del = new Delete(row); @@ -3763,9 +3265,9 @@ public Table createRandomTable(TableName tableName, final long ts = rand.nextInt(); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { - final byte[] value = Bytes.toBytes("value_for_row_" + iRow + - "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + - ts + "_random_" + rand.nextLong()); + final byte[] value = + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -3796,12 +3298,12 @@ public Table createRandomTable(TableName tableName, public static int randomFreePort() { return HBaseCommonTestingUtil.randomFreePort(); } + public static String randomMultiCastAddress() { return "226.1.1." + random.nextInt(254); } - public static void waitForHostPort(String host, int port) - throws IOException { + public static void waitForHostPort(String host, int port) throws IOException { final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; IOException savedException = null; @@ -3827,20 +3329,20 @@ public static void waitForHostPort(String host, int port) } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableName tableName, byte[] columnFamily, Algorithm compression, - DataBlockEncoding dataBlockEncoding) throws IOException { - return createPreSplitLoadTestTable(conf, tableName, - columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, - Durability.USE_DEFAULT); + public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) + throws IOException { + return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression, + dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT); } + /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, @@ -3858,8 +3360,8 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, @@ -3880,46 +3382,45 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd) throws IOException { return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd}, - numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd }, + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor[] hcds, - int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, hcds, - new RegionSplitter.HexStringSplit(), numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(), + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor td, ColumnFamilyDescriptor[] cds, - SplitAlgorithm splitter, int numRegionsPerServer) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td, + ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer) + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td); for (ColumnFamilyDescriptor cd : cds) { if (!td.hasColumnFamily(cd.getName())) { @@ -3934,27 +3435,25 @@ public static int createPreSplitLoadTestTable(Configuration conf, try { // create a table a pre-splits regions. // The number of splits is set as: - // region servers * regions per region server). + // region servers * regions per region server). int numberOfServers = admin.getRegionServers().size(); if (numberOfServers == 0) { throw new IllegalStateException("No live regionservers"); } totalNumberOfRegions = numberOfServers * numRegionsPerServer; - LOG.info("Number of live regionservers: " + numberOfServers + ", " + - "pre-splitting table into " + totalNumberOfRegions + " regions " + - "(regions per server: " + numRegionsPerServer + ")"); + LOG.info( + "Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + + totalNumberOfRegions + " regions " + "(regions per server: " + numRegionsPerServer + ")"); - byte[][] splits = splitter.split( - totalNumberOfRegions); + byte[][] splits = splitter.split(totalNumberOfRegions); admin.createTable(td, splits); } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); } catch (TableExistsException e) { - LOG.warn("Table " + td.getTableName() + - " already exists, continuing"); + LOG.warn("Table " + td.getTableName() + " already exists, continuing"); } finally { admin.close(); unmanagedConnection.close(); @@ -3969,14 +3468,11 @@ public static int getMetaRSPort(Connection connection) throws IOException { } /** - * Due to async racing issue, a region may not be in - * the online region list of a region server yet, after - * the assignment znode is deleted and the new assignment - * is recorded in master. + * Due to async racing issue, a region may not be in the online region list of a region server + * yet, after the assignment znode is deleted and the new assignment is recorded in master. */ - public void assertRegionOnServer( - final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + public void assertRegionOnServer(final RegionInfo hri, final ServerName server, + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); @@ -3985,30 +3481,27 @@ public void assertRegionOnServer( if (now > timeoutTime) break; Thread.sleep(10); } - fail("Could not find region " + hri.getRegionNameAsString() - + " on server " + server); + fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server); } /** - * Check to make sure the region is open on the specified - * region server, but not on any other one. + * Check to make sure the region is open on the specified region server, but not on any other one. */ - public void assertRegionOnlyOnServer( - final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server, + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) { List rsThreads = getHBaseCluster().getLiveRegionServerThreads(); - for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) { + for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) { HRegionServer rs = rsThread.getRegionServer(); if (server.equals(rs.getServerName())) { continue; } Collection hrs = rs.getOnlineRegionsLocalContext(); - for (HRegion r: hrs) { + for (HRegion r : hrs) { assertTrue("Region should not be double assigned", r.getRegionInfo().getRegionId() != hri.getRegionId()); } @@ -4019,21 +3512,20 @@ public void assertRegionOnlyOnServer( if (now > timeoutTime) break; Thread.sleep(10); } - fail("Could not find region " + hri.getRegionNameAsString() - + " on server " + server); + fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd, - BlockCache blockCache) throws IOException { + BlockCache blockCache) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache); } @@ -4049,8 +3541,8 @@ public ExplainingPredicate predicateNoRegionsInTransition() { return new ExplainingPredicate() { @Override public String explainFailure() throws IOException { - final RegionStates regionStates = getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates(); + final RegionStates regionStates = + getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); return "found in transition: " + regionStates.getRegionsInTransition().toString(); } @@ -4116,10 +3608,10 @@ public boolean evaluate() throws IOException { try (Table table = getConnection().getTable(tableName)) { TableDescriptor htd = table.getDescriptor(); for (HRegionLocation loc : getConnection().getRegionLocator(tableName) - .getAllRegionLocations()) { + .getAllRegionLocations()) { Scan scan = new Scan().withStartRow(loc.getRegion().getStartKey()) - .withStopRow(loc.getRegion().getEndKey()).setOneRowLimit() - .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); + .withStopRow(loc.getRegion().getEndKey()).setOneRowLimit() + .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); for (byte[] family : htd.getColumnFamilyNames()) { scan.addFamily(family); } @@ -4137,7 +3629,6 @@ public boolean evaluate() throws IOException { /** * Wait until no regions in transition. * @param timeout How long to wait. - * @throws IOException */ public void waitUntilNoRegionsInTransition(final long timeout) throws IOException { waitFor(timeout, predicateNoRegionsInTransition()); @@ -4153,8 +3644,6 @@ public void waitUntilNoRegionsInTransition() throws IOException { /** * Wait until labels is ready in VisibilityLabelsCache. - * @param timeoutMillis - * @param labels */ public void waitLabelAvailable(long timeoutMillis, final String... labels) { final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get(); @@ -4183,8 +3672,8 @@ public String explainFailure() { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @return the list of column descriptors */ public static List generateColumnDescriptors() { @@ -4192,17 +3681,17 @@ public static List generateColumnDescriptors() { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @param prefix family names prefix * @return the list of column descriptors */ public static List generateColumnDescriptors(final String prefix) { List columnFamilyDescriptors = new ArrayList<>(); long familyId = 0; - for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { - for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { - for (BloomType bloomType: BloomType.values()) { + for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) { + for (DataBlockEncoding encodingType : DataBlockEncoding.values()) { + for (BloomType bloomType : BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); @@ -4261,10 +3750,9 @@ private boolean isTargetTable(final byte[] inRow, Cell c) { } /** - * Sets up {@link MiniKdc} for testing security. - * Uses {@link HBaseKerberosUtils} to set the given keytab file as - * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. - * FYI, there is also the easier-to-use kerby KDC server and utility for using it, + * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given + * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use + * kerby KDC server and utility for using it, * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred; * less baggage. It came in in HBASE-5291. */ @@ -4284,7 +3772,7 @@ public MiniKdc setupMiniKdc(File keytabFile) throws Exception { kdc = new MiniKdc(conf, dir); kdc.start(); } catch (BindException e) { - FileUtils.deleteDirectory(dir); // clean directory + FileUtils.deleteDirectory(dir); // clean directory numTries++; if (numTries == 3) { LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times."); @@ -4301,14 +3789,13 @@ public MiniKdc setupMiniKdc(File keytabFile) throws Exception { public int getNumHFiles(final TableName tableName, final byte[] family) { int numHFiles = 0; for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) { - numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, - family); + numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, family); } return numHFiles; } public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, - final byte[] family) { + final byte[] family) { int numHFiles = 0; for (Region region : rs.getRegions(tableName)) { numHFiles += region.getStore(family).getStorefilesCount(); @@ -4321,10 +3808,9 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr Collection ltdFamilies = Arrays.asList(ltd.getColumnFamilies()); Collection rtdFamilies = Arrays.asList(rtd.getColumnFamilies()); assertEquals(ltdFamilies.size(), rtdFamilies.size()); - for (Iterator it = ltdFamilies.iterator(), it2 = - rtdFamilies.iterator(); it.hasNext();) { - assertEquals(0, - ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); + for (Iterator it = ltdFamilies.iterator(), + it2 = rtdFamilies.iterator(); it.hasNext();) { + assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java index 099b82b5e95c..44a8caeea336 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java @@ -97,7 +97,7 @@ public static Builder newBuilder() { } /** - * @return the underlying instance of {@link HBaseTestingUtil} + * Returns the underlying instance of {@link HBaseTestingUtil} */ public HBaseTestingUtil getTestingUtility() { return testingUtility; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java index dc899e038fac..a405f7b24a93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java @@ -47,10 +47,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** - * This class creates a single process HBase cluster. - * each server. The master uses the 'default' FileSystem. The RegionServers, - * if we are running on DistributedFilesystem, create a FileSystem instance - * each and will close down their instance on the way out. + * This class creates a single process HBase cluster. each server. The master uses the 'default' + * FileSystem. The RegionServers, if we are running on DistributedFilesystem, create a FileSystem + * instance each and will close down their instance on the way out. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceStability.Evolving @@ -101,16 +100,16 @@ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegi * each cluster start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters, - int numRegionServers, List rsPorts, Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + int numRegionServers, List rsPorts, Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { super(conf); // Hadoop 2 CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); init(numMasters, numAlwaysStandByMasters, numRegionServers, rsPorts, masterClass, - regionserverClass); + regionserverClass); this.initialClusterStatus = getClusterMetrics(); } @@ -119,30 +118,29 @@ public Configuration getConfiguration() { } /** - * Subclass so can get at protected methods (none at moment). Also, creates - * a FileSystem instance per instantiation. Adds a shutdown own FileSystem - * on the way out. Shuts down own Filesystem only, not All filesystems as - * the FileSystem system exit hook does. + * Subclass so can get at protected methods (none at moment). Also, creates a FileSystem instance + * per instantiation. Adds a shutdown own FileSystem on the way out. Shuts down own Filesystem + * only, not All filesystems as the FileSystem system exit hook does. */ public static class MiniHBaseClusterRegionServer extends HRegionServer { private Thread shutdownThread = null; private User user = null; /** - * List of RegionServers killed so far. ServerName also comprises startCode of a server, - * so any restarted instances of the same server will have different ServerName and will not - * coincide with past dead ones. So there's no need to cleanup this list. + * List of RegionServers killed so far. ServerName also comprises startCode of a server, so any + * restarted instances of the same server will have different ServerName and will not coincide + * with past dead ones. So there's no need to cleanup this list. */ static Set killedServers = new HashSet<>(); public MiniHBaseClusterRegionServer(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(conf); this.user = User.getCurrent(); } @Override - protected void handleReportForDutyResponse( - final RegionServerStartupResponse c) throws IOException { + protected void handleReportForDutyResponse(final RegionServerStartupResponse c) + throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); @@ -196,15 +194,17 @@ private void abortRegionServer(String reason, Throwable cause) { } /** - * Alternate shutdown hook. - * Just shuts down the passed fs, not all as default filesystem hook does. + * Alternate shutdown hook. Just shuts down the passed fs, not all as default filesystem hook + * does. */ static class SingleFileSystemShutdownThread extends Thread { private final FileSystem fs; + SingleFileSystemShutdownThread(final FileSystem fs) { super("Shutdown of " + fs); this.fs = fs; } + @Override public void run() { try { @@ -232,7 +232,7 @@ private void init(final int nMasterNodes, final int numAlwaysStandByMasters, // start up a LocalHBaseCluster hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, numAlwaysStandByMasters, 0, - masterClass, regionserverClass); + masterClass, regionserverClass); // manually add the regionservers as other users for (int i = 0; i < nRegionNodes; i++) { @@ -240,8 +240,7 @@ private void init(final int nMasterNodes, final int numAlwaysStandByMasters, if (rsPorts != null) { rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i)); } - User user = HBaseTestingUtil.getDifferentUser(rsConf, - ".hfs."+index++); + User user = HBaseTestingUtil.getDifferentUser(rsConf, ".hfs." + index++); hbaseCluster.addRegionServer(rsConf, i, user); } @@ -296,7 +295,7 @@ public void resumeRegionServer(ServerName serverName) throws IOException { @Override public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnRegionServer(getRegionServerIndex(serverName)); } @@ -392,7 +391,7 @@ public void stopMaster(ServerName serverName) throws IOException { @Override public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnMaster(getMasterIndex(serverName)); } @@ -400,20 +399,18 @@ public void waitForMasterToStop(ServerName serverName, long timeout) throws IOEx * Starts a region server thread running * @return New RegionServerThread */ - public JVMClusterUtil.RegionServerThread startRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException { final Configuration newConf = HBaseConfiguration.create(conf); return startRegionServer(newConf); } private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration) - throws IOException { - User rsUser = - HBaseTestingUtil.getDifferentUser(configuration, ".hfs."+index++); - JVMClusterUtil.RegionServerThread t = null; + throws IOException { + User rsUser = HBaseTestingUtil.getDifferentUser(configuration, ".hfs." + index++); + JVMClusterUtil.RegionServerThread t = null; try { - t = hbaseCluster.addRegionServer( - configuration, hbaseCluster.getRegionServers().size(), rsUser); + t = + hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), rsUser); t.start(); t.waitForServerOnline(); } catch (InterruptedException ie) { @@ -423,16 +420,15 @@ private JVMClusterUtil.RegionServerThread startRegionServer(Configuration config } /** - * Starts a region server thread and waits until its processed by master. Throws an exception - * when it can't start a region server or when the region server is not processed by master - * within the timeout. - * + * Starts a region server thread and waits until its processed by master. Throws an exception when + * it can't start a region server or when the region server is not processed by master within the + * timeout. * @return New RegionServerThread */ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) - throws IOException { + throws IOException { - JVMClusterUtil.RegionServerThread t = startRegionServer(); + JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); long start = EnvironmentEdgeManager.currentTime(); @@ -452,7 +448,7 @@ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) /** * Cause a region server to exit doing basic clean up only on its way out. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortRegionServer(int serverNumber) { HRegionServer server = getRegionServer(serverNumber); @@ -463,8 +459,7 @@ public String abortRegionServer(int serverNumber) { /** * Shut down the specified region server cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { @@ -480,9 +475,8 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, - final boolean shutdownFS) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + final boolean shutdownFS) { + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getRegionServer().stop("Stopping rs " + serverNumber); return server; @@ -493,8 +487,7 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, * @param serverNumber Used as index into a list. */ public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Suspending {}", server.toString()); server.suspend(); return server; @@ -505,8 +498,7 @@ public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { * @param serverNumber Used as index into a list. */ public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Resuming {}", server.toString()); server.resume(); return server; @@ -520,16 +512,13 @@ public String waitOnRegionServer(final int serverNumber) { return this.hbaseCluster.waitOnRegionServer(serverNumber); } - /** * Starts a master thread running - * * @return New RegionServerThread */ public JVMClusterUtil.MasterThread startMaster() throws IOException { Configuration c = HBaseConfiguration.create(conf); - User user = - HBaseTestingUtil.getDifferentUser(c, ".hfs."+index++); + User user = HBaseTestingUtil.getDifferentUser(c, ".hfs." + index++); JVMClusterUtil.MasterThread t = null; try { @@ -539,7 +528,7 @@ public JVMClusterUtil.MasterThread startMaster() throws IOException { throw new IOException("Interrupted adding master to cluster", ie); } conf.set(HConstants.MASTER_ADDRS_KEY, - hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); + hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); return t; } @@ -556,7 +545,7 @@ public HMaster getMaster() { * @return the active MasterThread, null if none is active. */ public MasterThread getMasterThread() { - for (MasterThread mt: hbaseCluster.getLiveMasters()) { + for (MasterThread mt : hbaseCluster.getLiveMasters()) { if (mt.getMaster().isActiveMaster()) { return mt; } @@ -574,7 +563,7 @@ public HMaster getMaster(final int serverNumber) { /** * Cause a master to exit without shutting down entire cluster. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortMaster(int serverNumber) { HMaster server = getMaster(serverNumber); @@ -585,8 +574,7 @@ public String abortMaster(int serverNumber) { /** * Shut down the specified master cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { @@ -601,10 +589,8 @@ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { * test and you shut down one before end of the test. * @return the master that was stopped */ - public JVMClusterUtil.MasterThread stopMaster(int serverNumber, - final boolean shutdownFS) { - JVMClusterUtil.MasterThread server = - hbaseCluster.getMasters().get(serverNumber); + public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) { + JVMClusterUtil.MasterThread server = hbaseCluster.getMasters().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getMaster().stop("Stopping master " + serverNumber); return server; @@ -619,24 +605,18 @@ public String waitOnMaster(final int serverNumber) { } /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. */ @Override public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { - List mts; long start = EnvironmentEdgeManager.currentTime(); - while (!(mts = getMasterThreads()).isEmpty() - && (EnvironmentEdgeManager.currentTime() - start) < timeout) { - for (JVMClusterUtil.MasterThread mt : mts) { + while (EnvironmentEdgeManager.currentTime() - start < timeout) { + for (JVMClusterUtil.MasterThread mt : getMasterThreads()) { if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) { return true; } } - Threads.sleep(100); } return false; @@ -722,9 +702,8 @@ public void flushcache(TableName tableName) throws IOException { * Call flushCache on all regions on all participating regionservers. */ public void compact(boolean major) throws IOException { - for (JVMClusterUtil.RegionServerThread t: - this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { + for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) { + for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) { r.compact(major); } } @@ -734,10 +713,9 @@ public void compact(boolean major) throws IOException { * Call flushCache on all regions of the specified table. */ public void compact(TableName tableName, boolean major) throws IOException { - for (JVMClusterUtil.RegionServerThread t: - this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { - if(r.getTableDescriptor().getTableName().equals(tableName)) { + for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) { + for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) { + if (r.getTableDescriptor().getTableName().equals(tableName)) { r.compact(major); } } @@ -760,7 +738,7 @@ public List getRegionServerThreads() { } /** - * @return List of live region server threads (skips the aborted and the killed) + * Returns List of live region server threads (skips the aborted and the killed) */ public List getLiveRegionServerThreads() { return this.hbaseCluster.getLiveRegionServers(); @@ -775,10 +753,8 @@ public HRegionServer getRegionServer(int serverNumber) { } public HRegionServer getRegionServer(ServerName serverName) { - return hbaseCluster.getRegionServers().stream() - .map(t -> t.getRegionServer()) - .filter(r -> r.getServerName().equals(serverName)) - .findFirst().orElse(null); + return hbaseCluster.getRegionServers().stream().map(t -> t.getRegionServer()) + .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null); } public List getRegions(byte[] tableName) { @@ -791,7 +767,7 @@ public List getRegions(TableName tableName) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegionsLocalContext()) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } @@ -809,12 +785,12 @@ public int getServerWithMeta() { /** * Get the location of the specified region * @param regionName Name of the region in bytes - * @return Index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} - * of HRS carrying hbase:meta. Returns -1 if none found. + * @return Index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} of HRS + * carrying hbase:meta. Returns -1 if none found. */ public int getServerWith(byte[] regionName) { int index = 0; - for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (!hrs.isStopped()) { Region region = hrs.getOnlineRegion(regionName); @@ -864,8 +840,8 @@ public long countServedRegions() { } /** - * Do a simulated kill all masters and regionservers. Useful when it is - * impossible to bring the mini-cluster back for clean shutdown. + * Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the + * mini-cluster back for clean shutdown. */ public void killAll() { // Do backups first. @@ -897,18 +873,17 @@ public List findRegionsForTable(TableName tableName) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getRegions(tableName)) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } return ret; } - protected int getRegionServerIndex(ServerName serverName) { - //we have a small number of region servers, this should be fine for now. + // we have a small number of region servers, this should be fine for now. List servers = getRegionServerThreads(); - for (int i=0; i < servers.size(); i++) { + for (int i = 0; i < servers.size(); i++) { if (servers.get(i).getRegionServer().getServerName().equals(serverName)) { return i; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java index 60e61f6451aa..afd74bb9314f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java @@ -45,8 +45,8 @@ @InterfaceStability.Evolving public final class StartTestingClusterOption { /** - * Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you - * can find the active/primary master with {@link SingleProcessHBaseCluster#getMaster()}. + * Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you can + * find the active/primary master with {@link SingleProcessHBaseCluster#getMaster()}. */ private final int numMasters; @@ -63,9 +63,8 @@ public final class StartTestingClusterOption { private final Class masterClass; /** - * Number of region servers to start up. - * If this value is > 1, then make sure config "hbase.regionserver.info.port" is -1 - * (i.e. no ui per regionserver) otherwise bind errors. + * Number of region servers to start up. If this value is > 1, then make sure config + * "hbase.regionserver.info.port" is -1 (i.e. no ui per regionserver) otherwise bind errors. */ private final int numRegionServers; /** @@ -96,13 +95,13 @@ public final class StartTestingClusterOption { private final int numZkServers; /** - * Whether to create a new root or data directory path. If true, the newly created data directory - * will be configured as HBase rootdir. This will overwrite existing root directory config. + * Whether to create a new root or data directory path. If true, the newly created data directory + * will be configured as HBase rootdir. This will overwrite existing root directory config. */ private final boolean createRootDir; /** - * Whether to create a new WAL directory. If true, the newly created directory will be configured + * Whether to create a new WAL directory. If true, the newly created directory will be configured * as HBase wal.dir which is separate from HBase rootdir. */ private final boolean createWALDir; @@ -174,15 +173,15 @@ public boolean isCreateWALDir() { @Override public String toString() { - return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass - + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) - + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes - + ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers - + ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}'; + return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass + + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" + + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" + + createRootDir + ", createWALDir=" + createWALDir + '}'; } /** - * @return a new builder. + * Returns a new builder. */ public static Builder builder() { return new Builder(); @@ -190,7 +189,7 @@ public static Builder builder() { /** * Builder pattern for creating an {@link StartTestingClusterOption}. - * + *

* The default values of its fields should be considered public and constant. Changing the default * values may cause other tests fail. */ @@ -214,9 +213,9 @@ public StartTestingClusterOption build() { if (dataNodeHosts != null && dataNodeHosts.length != 0) { numDataNodes = dataNodeHosts.length; } - return new StartTestingClusterOption(numMasters,numAlwaysStandByMasters, masterClass, - numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers, - createRootDir, createWALDir); + return new StartTestingClusterOption(numMasters, numAlwaysStandByMasters, masterClass, + numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers, + createRootDir, createWALDir); } public Builder numMasters(int numMasters) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index db7c89ca3844..bedc104b6c26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -108,9 +109,10 @@ public class BaseTestHBaseFsck { * Debugging method to dump the contents of meta. */ protected void dumpMeta(TableName tableName) throws IOException { - List metaRows = TEST_UTIL.getMetaTableRows(tableName); - for (byte[] row : metaRows) { - LOG.info(Bytes.toString(row)); + List regions = + MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tableName); + for (RegionInfo region : regions) { + LOG.info(region.getRegionNameAsString()); } } @@ -210,7 +212,6 @@ protected void deleteRegion(Configuration conf, final TableDescriptor htd, LOG.info(hri.toString() + hsa.toString()); } - TEST_UTIL.getMetaTableRows(htd.getTableName()); LOG.info("*** After delete:"); dumpMeta(htd.getTableName()); } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java index acc5f144beca..e1223cef00ea 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java @@ -54,8 +54,8 @@ public HBaseZKTestingUtil(Configuration conf) { } /** - * @return Where the cluster will write data on the local subsystem. Creates it if it does not - * exist already. A subdir of {@code HBaseCommonTestingUtility#getBaseTestDir()} + * Returns Where the cluster will write data on the local subsystem. Creates it if it does not + * exist already. A subdir of {@code HBaseCommonTestingUtility#getBaseTestDir()} */ Path getClusterTestDir() { if (clusterTestDir == null) { @@ -99,7 +99,7 @@ public MiniZooKeeperCluster startMiniZKCluster() throws Exception { * @return zk cluster started. */ public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... clientPortList) - throws Exception { + throws Exception { setupClusterTestDir(); return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList); } @@ -109,7 +109,7 @@ public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... cl * port mentioned is used as the default port for ZooKeeper. */ private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum, - int[] clientPortList) throws Exception { + int[] clientPortList) throws Exception { if (this.zkCluster != null) { throw new IOException("Cluster already running at " + dir); } @@ -159,7 +159,7 @@ public void shutdownMiniZKCluster() throws IOException { * users. Don't close it, it will be closed automatically when the cluster shutdowns * @return The ZKWatcher instance. */ - public synchronized ZKWatcher getZooKeeperWatcher() throws IOException { + public ZKWatcher getZooKeeperWatcher() throws IOException { if (zooKeeperWatcher == null) { zooKeeperWatcher = new ZKWatcher(conf, "testing utility", new Abortable() { @Override @@ -177,27 +177,7 @@ public boolean isAborted() { } /** - * Gets a ZKWatcher. - */ - public static ZKWatcher getZooKeeperWatcher(HBaseZKTestingUtil testUtil) throws IOException { - return new ZKWatcher(testUtil.getConfiguration(), "unittest", new Abortable() { - boolean aborted = false; - - @Override - public void abort(String why, Throwable e) { - aborted = true; - throw new RuntimeException("Fatal ZK error, why=" + why, e); - } - - @Override - public boolean isAborted() { - return aborted; - } - }); - } - - /** - * @return True if we removed the test dirs + * Returns true if we removed the test dirs */ @Override public boolean cleanupTestDir() {