Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,22 @@ public String toString() {
public int compareTo(RunResult o) {
Copy link
Contributor Author

@NihalJain NihalJain Oct 22, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Issue list for quick ref, lets see how many gets fixed with 1st commit:

Reason Tests
SpotBugs module:hbase-server
  Random object created and used only once in org.apache.hadoop.hbase.util.LoadTestKVGenerator.getValueForRowColumn(int, byte[][]) At LoadTestKVGenerator.java:only once in org.apache.hadoop.hbase.util.LoadTestKVGenerator.getValueForRowColumn(int, byte[][]) At LoadTestKVGenerator.java:[line 111]
SpotBugs module:root
  Integral division result cast to double or float in org.apache.hadoop.hbase.PerformanceEvaluation.calculateRowsAndSize(PerformanceEvaluation$TestOptions) At PerformanceEvaluation.java:double or float in org.apache.hadoop.hbase.PerformanceEvaluation.calculateRowsAndSize(PerformanceEvaluation$TestOptions) At PerformanceEvaluation.java:[line 3154]
  org.apache.hadoop.hbase.PerformanceEvaluation$RunResult defines compareTo(PerformanceEvaluation$RunResult) and uses Object.equals() At PerformanceEvaluation.java:Object.equals() At PerformanceEvaluation.java:[line 250]
  Random object created and used only once in org.apache.hadoop.hbase.util.LoadTestKVGenerator.getValueForRowColumn(int, byte[][]) At LoadTestKVGenerator.java:only once in org.apache.hadoop.hbase.util.LoadTestKVGenerator.getValueForRowColumn(int, byte[][]) At LoadTestKVGenerator.java:[line 111]
  org.apache.hadoop.hbase.util.LoadTestTool.DEFAULT_NUM_REGIONS_PER_SERVER isn't final but should be At LoadTestTool.java:be At LoadTestTool.java:[line 167]
  org.apache.hadoop.hbase.util.MultiThreadedAction.verifyResultAgainstDataGenerator(Result, boolean, boolean) concatenates strings using + in a loop At MultiThreadedAction.java:using + in a loop At MultiThreadedAction.java:[line 415]
  Integral division result cast to double or float in org.apache.hadoop.hbase.util.MultiThreadedAction$ProgressReporter.run() At MultiThreadedAction.java:double or float in org.apache.hadoop.hbase.util.MultiThreadedAction$ProgressReporter.run() At MultiThreadedAction.java:[line 206]
  org.apache.hadoop.hbase.util.MultiThreadedReader$HBaseReaderThread.createGet(long) concatenates strings using + in a loop At MultiThreadedReader.java:in a loop At MultiThreadedReader.java:[line 318]
  Dead store to rowKey in org.apache.hadoop.hbase.util.MultiThreadedReaderWithACL$HBaseReaderThreadWithACL.queryKey(Get, boolean, long) At MultiThreadedReaderWithACL.java:org.apache.hadoop.hbase.util.MultiThreadedReaderWithACL$HBaseReaderThreadWithACL.queryKey(Get, boolean, long) At MultiThreadedReaderWithACL.java:[line 91]
  Inconsistent synchronization of org.apache.hadoop.hbase.util.MultiThreadedUpdater.writer; locked 75% of time Unsynchronized access at MultiThreadedUpdater.java:75% of time Unsynchronized access at MultiThreadedUpdater.java:[line 80]
  Unwritten field:MultiThreadedUpdaterWithACL.java:[line 94]
  Exception is caught when Exception is not thrown in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation.run(String[]) At WALPerformanceEvaluation.java:is not thrown in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation.run(String[]) At WALPerformanceEvaluation.java:[line 244]
  Format string should use %n rather than n in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation.printUsageAndExit() At WALPerformanceEvaluation.java:rather than n in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation.printUsageAndExit() At WALPerformanceEvaluation.java:[line 414]
  Exception is caught when Exception is not thrown in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation$WALPutBenchmark.run() At WALPerformanceEvaluation.java:is not thrown in org.apache.hadoop.hbase.wal.WALPerformanceEvaluation$WALPutBenchmark.run() At WALPerformanceEvaluation.java:[line 171]

return Long.compare(this.duration, o.duration);
}

@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
return this.compareTo((RunResult) obj) == 0;
}

@Override
public int hashCode() {
return Long.hashCode(duration);
}
}

/**
Expand Down Expand Up @@ -3144,14 +3160,15 @@ static TestOptions calculateRowsAndSize(final TestOptions opts) {
&& (opts.getCmdName().equals(RANDOM_READ) || opts.getCmdName().equals(RANDOM_SEEK_SCAN)))
&& opts.size != DEFAULT_OPTS.size && opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows
) {
opts.totalRows = (int) opts.size * rowsPerGB;
opts.totalRows = (int) (opts.size * rowsPerGB);
} else if (opts.size != DEFAULT_OPTS.size) {
// total size in GB specified
opts.totalRows = (int) opts.size * rowsPerGB;
opts.totalRows = (int) (opts.size * rowsPerGB);
opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
} else {
opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
opts.size = opts.totalRows / rowsPerGB;
// Cast to float to ensure floating-point division
opts.size = (float) opts.totalRows / rowsPerGB;
}
return opts;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ public class LoadTestTool extends AbstractHBaseTool {
public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server";
protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE =
"Desired number of regions per region server. Defaults to 5.";
public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5;
public static final int DEFAULT_NUM_REGIONS_PER_SERVER = 5;

public static final String OPT_REGION_REPLICATION = "region_replication";
protected static final String OPT_REGION_REPLICATION_USAGE =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,16 +203,16 @@ public void run() {
long numKeysDelta = numKeys - priorNumKeys;
long totalOpTimeDelta = totalOpTime - priorCumulativeOpTime;

double averageKeysPerSecond = (time > 0) ? (numKeys * 1000 / time) : 0;
double averageKeysPerSecond = (time > 0) ? (numKeys * 1000.0 / time) : 0;

LOG.info(threadsLeft + "Keys=" + numKeys + ", cols="
+ StringUtils.humanReadableInt(numCols.get()) + ", time=" + formatTime(time)
+ ((numKeys > 0 && time > 0)
? (" Overall: [" + "keys/s= " + numKeys * 1000 / time + ", latency="
? (" Overall: [" + "keys/s= " + (numKeys * 1000.0 / time) + ", latency="
+ String.format("%.2f", (double) totalOpTime / (double) numKeys) + " ms]")
: "")
+ ((numKeysDelta > 0)
? (" Current: [" + "keys/s=" + numKeysDelta * 1000 / REPORTING_INTERVAL_MS
? (" Current: [" + "keys/s=" + (numKeysDelta * 1000.0 / REPORTING_INTERVAL_MS)
+ ", latency="
+ String.format("%.2f", (double) totalOpTimeDelta / (double) numKeysDelta) + " ms]")
: "")
Expand Down Expand Up @@ -407,15 +407,15 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal
verifyCfAndColumnIntegrity
&& !dataGenerator.verify(result.getRow(), cf, columnValues.keySet())
) {
String colsStr = "";
StringBuilder colsStr = new StringBuilder();
for (byte[] col : columnValues.keySet()) {
if (colsStr.length() > 0) {
colsStr += ", ";
colsStr.append(", ");
}
colsStr += "[" + Bytes.toString(col) + "]";
colsStr.append("[").append(Bytes.toString(col)).append("]");
}
LOG.error("Error checking data for key [" + rowKeyStr + "], bad columns for family ["
+ cfStr + "]: " + colsStr);
LOG.error("Error checking data for key [{}], bad columns for family [{}]: {}", rowKeyStr,
cfStr, colsStr.toString());
printLocations(result);
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,15 +307,15 @@ private Get[] readKey(long[] keysToRead) {

protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
StringBuilder cfsString = new StringBuilder();
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
cfsString.append(", ");
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
cfsString.append("[").append(Bytes.toStringBinary(cf)).append("]");
}
}
get = dataGenerator.beforeGet(keyToRead, get);
Expand All @@ -324,7 +324,7 @@ protected Get createGet(long keyToRead) throws IOException {
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
LOG.info("[{}] Querying key {}, cfs {}", readerId, keyToRead, cfsString.toString());
}
return get;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,6 @@ protected void closeTable() {
@Override
public void queryKey(final Get get, final boolean verify, final long keyToRead)
throws IOException {
final String rowKey = Bytes.toString(get.getRow());

// read the data
final long start = System.nanoTime();
PrivilegedExceptionAction<Object> action = new PrivilegedExceptionAction<Object>() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public void setBatchUpdate(boolean isBatchUpdate) {
this.isBatchUpdate = isBatchUpdate;
}

public void linkToWriter(MultiThreadedWriterBase writer) {
public synchronized void linkToWriter(MultiThreadedWriterBase writer) {
this.writer = writer;
writer.setTrackWroteKeys(true);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,6 @@ protected void addUpdaterThreads(int numThreads) throws IOException {
}

public class HBaseUpdaterThreadWithACL extends HBaseUpdaterThread {

private Table table;
private MutateAccessAction mutateAction = new MutateAccessAction();

public HBaseUpdaterThreadWithACL(int updaterId) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ public void run() {
loopSpan.end();
}
}
} catch (Exception e) {
} catch (IOException e) {
LOG.error(getClass().getSimpleName() + " Thread failed", e);
} finally {
threadSpan.end();
Expand Down Expand Up @@ -241,7 +241,7 @@ public int run(String[] args) throws Exception {
System.err.println("UNEXPECTED: " + cmd);
printUsageAndExit();
}
} catch (Exception e) {
} catch (NumberFormatException e) {
printUsageAndExit();
}
}
Expand Down Expand Up @@ -411,7 +411,7 @@ private static void logBenchmarkResult(String testName, long numTests, long tota
}

private void printUsageAndExit() {
System.err.printf("Usage: hbase %s [options]\n", getClass().getName());
System.err.printf("Usage: hbase %s [options]%n", getClass().getName());
System.err.println(" where [options] are:");
System.err.println(" -h|-help Show this help and exit.");
System.err.println(" -threads <N> Number of threads writing on the WAL.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ public byte[] generateRandomSizeValue(byte[]... seedStrings) {
* Generates random bytes of the given size for the given row and column qualifier. The random
* seed is fully determined by these parameters.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DMI_RANDOM_USED_ONLY_ONCE")
private static byte[] getValueForRowColumn(int dataSize, byte[]... seedStrings) {
long seed = dataSize;
for (byte[] str : seedStrings) {
Expand Down