Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion hbase-hbck2/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@
</goals>
</execution>
</executions>
</plugin>
</plugin><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-compiler-plugin</artifactId><configuration><source>8</source><target>8</target></configuration></plugin>
</plugins>
</build>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,46 +191,60 @@ Map<TableName, List<T>> reportTablesRegions(final List<String> namespacesOrTable
}

List<Future<List<String>>> processRegionsMetaCleanup(
ExecFunction<Map<TableName, List<T>>, List<String>> reportFunction,
ExecFunction<List<String>, List<T>> execFunction, List<String> nameSpaceOrTable)
throws IOException {
ExecutorService executorService = Executors.newFixedThreadPool((nameSpaceOrTable == null
|| nameSpaceOrTable.size() > Runtime.getRuntime().availableProcessors())
? Runtime.getRuntime().availableProcessors()
: nameSpaceOrTable.size());
ExecFunction<Map<TableName, List<T>>, List<String>> reportFunction,
ExecFunction<List<String>, List<T>> execFunction, List<String> nameSpaceOrTable)
throws IOException {

// Determine the number of available processors
int availableProcessors = Runtime.getRuntime().availableProcessors();

// Decide on the thread pool size based on the provided list size
int threadPoolSize;
if (nameSpaceOrTable == null || nameSpaceOrTable.size() > availableProcessors) {
threadPoolSize = availableProcessors;
} else {
threadPoolSize = nameSpaceOrTable.size();
}

// Create the executor service using the determined thread pool size
ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);
List<Future<List<String>>> futures =
new ArrayList<>(nameSpaceOrTable == null ? 1 : nameSpaceOrTable.size());
new ArrayList<>(nameSpaceOrTable == null ? 1 : nameSpaceOrTable.size());

try {
try (final Admin admin = conn.getAdmin()) {
Map<TableName, List<T>> report = reportFunction.execute(nameSpaceOrTable);
if (report.size() < 1) {
if (report.isEmpty()) {
LOG.info("\nNo mismatches found in meta. Worth using related reporting function "
+ "first.\nYou are likely passing non-existent "
+ "namespace or table. Note that table names should include the namespace "
+ "portion even for tables in the default namespace. "
+ "See also the command usage.\n");
+ "first.\nYou are likely passing non-existent "
+ "namespace or table. Note that table names should include the namespace "
+ "portion even for tables in the default namespace. "
+ "See also the command usage.\n");
}
for (TableName tableName : report.keySet()) {
if (admin.tableExists(tableName)) {
futures.add(executorService.submit(new Callable<List<String>>() {
@Override
public List<String> call() throws Exception {
LOG.debug("running thread for {}", tableName.getNameWithNamespaceInclAsString());
LOG.debug("running thread for {}",
tableName.getNameWithNamespaceInclAsString());
return execFunction.execute(report.get(tableName));
}
}));
} else {
LOG.warn("Table {} does not exist! Skipping...",
tableName.getNameWithNamespaceInclAsString());
tableName.getNameWithNamespaceInclAsString());
}
}
boolean allDone;
do {
allDone = true;
for (Future<List<String>> f : futures) {
allDone &= f.isDone();

// Wait for each future to complete.
for (Future<List<String>> future : futures) {
try {
future.get();
} catch (Exception e) {
LOG.error("Exception while waiting for future completion", e);
}
} while (!allDone);
}
}
} finally {
executorService.shutdown();
Expand Down
836 changes: 260 additions & 576 deletions hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java

Large diffs are not rendered by default.

371 changes: 371 additions & 0 deletions hbase-hbck2/src/main/java/org/apache/hbase/HBCK2CommandUsage.java

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package org.apache.hbase;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public abstract class BaseHBaseMaintenanceTool extends Configured implements Tool {
protected static final Logger LOG = LoggerFactory.getLogger(BaseHBaseMaintenanceTool.class);
protected final Configuration conf;
protected final FileSystem fs;

protected BaseHBaseMaintenanceTool(Configuration conf) throws IOException {
super(conf);
this.conf = HBaseConfiguration.create(conf);
this.fs = FileSystem.get(this.conf);
}

protected Connection createConnection() throws IOException {
return ConnectionFactory.createConnection(conf);
}

public static int launchTool(String[] args, BaseHBaseMaintenanceTool tool) {
try {
return ToolRunner.run(tool, args);
} catch (Exception e) {
LOG.error("Tool failed:", e);
return 1;
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package org.apache.hbase;

import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.RegionInfo;

/**
* Default strategy for merging regions.
* Merges if neither region is split and if the combined region size is acceptable.
*/
public class DefaultMergeStrategy implements RegionMergeStrategy {
private final RegionsMerger merger;
private final Path tableDir;
private final Set<RegionInfo> mergingRegions;

public DefaultMergeStrategy(RegionsMerger merger, Path tableDir, Set<RegionInfo> mergingRegions) {
this.merger = merger;
this.tableDir = tableDir;
this.mergingRegions = mergingRegions;
}

@Override
public boolean canMerge(RegionInfo region1, RegionInfo region2) throws Exception {
// Only merge if both regions are not split and the RegionsMerger's logic approves.
return !region1.isSplit() && !region2.isSplit() && merger.canMerge(tableDir, region1, region2, mergingRegions);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,19 +36,14 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class MissingRegionDirsRepairTool extends Configured implements org.apache.hadoop.util.Tool {

private static final Logger LOG =
LoggerFactory.getLogger(MissingRegionDirsRepairTool.class.getName());

public class MissingRegionDirsRepairTool extends BaseHBaseMaintenanceTool {
private static final String WORKING_DIR = ".missing_dirs_repair";

private Configuration conf;
private HBCK2 hbck;
private LoadIncrementalHFiles bulkLoad;

public MissingRegionDirsRepairTool(Configuration conf) {
this.conf = conf;
public MissingRegionDirsRepairTool(Configuration conf) throws IOException {
super(conf); // Initialize base class
this.hbck = new HBCK2(conf);
this.bulkLoad = new LoadIncrementalHFiles(conf);
}
Expand Down Expand Up @@ -114,10 +109,6 @@ public int run(String[] strings) throws Exception {
}

public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
int errCode = ToolRunner.run(new MissingRegionDirsRepairTool(conf), args);
if (errCode != 0) {
System.exit(errCode);
}
System.exit(launchTool(args, new MissingRegionDirsRepairTool(HBaseConfiguration.create())));
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package org.apache.hbase;

import org.apache.hadoop.hbase.client.RegionInfo;

/**
* Strategy interface for deciding if two regions can be merged.
*/
public interface RegionMergeStrategy {
/**
* Determines whether the two regions can be merged.
*
* @param region1 the first region
* @param region2 the second region
* @return true if mergeable; false otherwise.
* @throws Exception if any error occurs during evaluation.
*/
boolean canMerge(RegionInfo region1, RegionInfo region2) throws Exception;
}
Loading