Skip to content

Commit 9164ed4

Browse files
LYCJeffApache9
authored andcommitted
HBASE-27530 Fix comment syntax errors (#4910)
Signed-off-by: Duo Zhang <[email protected]> (cherry picked from commit f9518cc)
1 parent 7a80b01 commit 9164ed4

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@
7777
* An asynchronous HDFS output stream implementation which fans out data to datanode and only
7878
* supports writing file with only one block.
7979
* <p>
80-
* Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly
80+
* Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The main
8181
* usage of this class is implementing WAL, so we only expose a little HDFS configurations in the
8282
* method. And we place it here under io package because we want to make it independent of WAL
8383
* implementation thus easier to move it to HDFS project finally.
@@ -97,8 +97,8 @@
9797
@InterfaceAudience.Private
9898
public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
9999

100-
// The MAX_PACKET_SIZE is 16MB but it include the header size and checksum size. So here we set a
101-
// smaller limit for data size.
100+
// The MAX_PACKET_SIZE is 16MB, but it includes the header size and checksum size. So here we set
101+
// a smaller limit for data size.
102102
private static final int MAX_DATA_LEN = 12 * 1024 * 1024;
103103

104104
private final Configuration conf;
@@ -161,7 +161,7 @@ public Callback(CompletableFuture<Long> future, long ackedLength,
161161
private long nextPacketOffsetInBlock = 0L;
162162

163163
// the length of the trailing partial chunk, this is because the packet start offset must be
164-
// aligned with the length of checksum chunk so we need to resend the same data.
164+
// aligned with the length of checksum chunk, so we need to resend the same data.
165165
private int trailingPartialChunkLength = 0;
166166

167167
private long nextPacketSeqno = 0L;
@@ -416,7 +416,7 @@ private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
416416
checksumBuf.release();
417417
headerBuf.release();
418418

419-
// This method takes ownership of the dataBuf so we need release it before returning.
419+
// This method takes ownership of the dataBuf, so we need release it before returning.
420420
dataBuf.release();
421421
return;
422422
}

hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,14 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf,
7272
* file's primary node. If all is well, it should return near immediately. But, as is common, it
7373
* is the very primary node that has crashed and so the namenode will be stuck waiting on a socket
7474
* timeout before it will ask another datanode to start the recovery. It does not help if we call
75-
* recoverLease in the meantime and in particular, subsequent to the socket timeout, a
76-
* recoverLease invocation will cause us to start over from square one (possibly waiting on socket
77-
* timeout against primary node). So, in the below, we do the following: 1. Call recoverLease. 2.
78-
* If it returns true, break. 3. If it returns false, wait a few seconds and then call it again.
79-
* 4. If it returns true, break. 5. If it returns false, wait for what we think the datanode
80-
* socket timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it
81-
* returns false, repeat starting at step 5. above. If HDFS-4525 is available, call it every
82-
* second and we might be able to exit early.
75+
* recoverLease in the meantime and in particular, after the socket timeout, a recoverLease
76+
* invocation will cause us to start over from square one (possibly waiting on socket timeout
77+
* against primary node). So, in the below, we do the following: 1. Call recoverLease. 2. If it
78+
* returns true, break. 3. If it returns false, wait a few seconds and then call it again. 4. If
79+
* it returns true, break. 5. If it returns false, wait for what we think the datanode socket
80+
* timeout is (configurable) and then try again. 6. If it returns true, break. 7. If it returns
81+
* false, repeat starting at step 5. above. If HDFS-4525 is available, call it every second, and
82+
* we might be able to exit early.
8383
*/
8484
private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
8585
final Configuration conf, final CancelableProgressable reporter) throws IOException {
@@ -89,10 +89,10 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina
8989
// usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
9090
// beyond that limit 'to be safe'.
9191
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
92-
// This setting should be a little bit above what the cluster dfs heartbeat is set to.
92+
// This setting should be a little above what the cluster dfs heartbeat is set to.
9393
long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
9494
// This should be set to how long it'll take for us to timeout against primary datanode if it
95-
// is dead. We set it to 64 seconds, 4 second than the default READ_TIMEOUT in HDFS, the
95+
// is dead. We set it to 64 seconds, 4 seconds than the default READ_TIMEOUT in HDFS, the
9696
// default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this
9797
// timeout, then further recovery will take liner backoff with this base, to avoid endless
9898
// preemptions when this value is not properly configured.
@@ -118,7 +118,7 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina
118118
Thread.sleep(firstPause);
119119
} else {
120120
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
121-
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
121+
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though).
122122
long localStartWaiting = EnvironmentEdgeManager.currentTime();
123123
while (
124124
(EnvironmentEdgeManager.currentTime() - localStartWaiting)

hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
6767

6868
/**
69-
* The base class for load balancers. It provides the the functions used to by
69+
* The base class for load balancers. It provides the functions used to by
7070
* {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign regions in the edge
7171
* cases. It doesn't provide an implementation of the actual balancing algorithm.
7272
*/
@@ -1456,7 +1456,7 @@ public Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, Server
14561456
return assignments;
14571457
}
14581458

1459-
// Group all of the old assignments by their hostname.
1459+
// Group all the old assignments by their hostname.
14601460
// We can't group directly by ServerName since the servers all have
14611461
// new start-codes.
14621462

@@ -1614,7 +1614,7 @@ private ServerName randomAssignment(Cluster cluster, RegionInfo regionInfo,
16141614
}
16151615

16161616
/**
1617-
* Round robin a list of regions to a list of servers
1617+
* Round-robin a list of regions to a list of servers
16181618
*/
16191619
private void roundRobinAssignment(Cluster cluster, List<RegionInfo> regions,
16201620
List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {

0 commit comments

Comments
 (0)