From 7e7fe4654a237ab2a6b083f0ce6dbbb8fccf0d39 Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Thu, 10 Feb 2022 17:29:02 +0800 Subject: [PATCH 1/2] HBASE-26552 Introduce retry to logroller to avoid abort --- .../hadoop/hbase/wal/AbstractWALRoller.java | 50 +++++++++++++++---- 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index 3ad1c5cd17ca..c5dd7a6c4641 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -60,6 +60,10 @@ public abstract class AbstractWALRoller extends Thread protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; + protected static final String WAL_ROLL_WAIT_TIMEOUT = "hbase.regionserver.logroll.wait.timeout.ms"; + + protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; + protected final ConcurrentMap wals = new ConcurrentHashMap<>(); protected final T abortable; // Period to roll log. @@ -67,6 +71,10 @@ public abstract class AbstractWALRoller extends Thread private final int threadWakeFrequency; // The interval to check low replication on hlog's pipeline private final long checkLowReplicationInterval; + // Wait period for roll log + private final long rollWaitTimeout; + // Max retry for roll log + private final int maxRollRetry; private volatile boolean running = true; @@ -114,6 +122,8 @@ protected AbstractWALRoller(String name, Configuration conf, T abortable) { this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.checkLowReplicationInterval = conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); + this.rollWaitTimeout = conf.getLong(WAL_ROLL_WAIT_TIMEOUT, 30000); + this.maxRollRetry = conf.getInt(WAL_ROLL_RETRIES, 2); } /** @@ -184,18 +194,38 @@ public void run() { } else { continue; } - try { - // Force the roll if the logroll.period is elapsed or if a roll was requested. - // The returned value is an collection of actual region and family names. - Map> regionsToFlush = controller.rollWal(now); - if (regionsToFlush != null) { - for (Map.Entry> r : regionsToFlush.entrySet()) { - scheduleFlush(Bytes.toString(r.getKey()), r.getValue()); + Map> regionsToFlush = null; + int nAttempts = 0; + long startWaiting = EnvironmentEdgeManager.currentTime(); + do { + try { + // Force the roll if the logroll.period is elapsed or if a roll was requested. + // The returned value is an collection of actual region and family names. + regionsToFlush = controller.rollWal(EnvironmentEdgeManager.currentTime()); + break; + } catch (IOException ioe) { + if (ioe instanceof WALClosedException) { + LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", ioe); + iter.remove(); + break; } + long waitingTime = EnvironmentEdgeManager.currentTime() - startWaiting; + if (waitingTime < rollWaitTimeout && nAttempts < maxRollRetry) { + nAttempts++; + LOG.warn("Retry to roll log, nAttempts={}, waiting time={}ms, sleeping 1s to retry," + + " last excepiton= {}", nAttempts, waitingTime, + ioe.getCause().getClass().getSimpleName()); + sleep(1000); + } else { + LOG.error("Roll wal failed and waiting timeout, will not retry", ioe); + throw ioe; + } + } + } while (EnvironmentEdgeManager.currentTime() - startWaiting < rollWaitTimeout); + if (regionsToFlush != null) { + for (Map.Entry> r : regionsToFlush.entrySet()) { + scheduleFlush(Bytes.toString(r.getKey()), r.getValue()); } - } catch (WALClosedException e) { - LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", e); - iter.remove(); } } } catch (FailedLogCloseException | ConnectException e) { From 34b603454615d0c2ac49fde9bbbdad04d4cc6d8e Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Thu, 17 Feb 2022 13:56:59 +0800 Subject: [PATCH 2/2] update the default value, and add some explanation --- .../apache/hadoop/hbase/wal/AbstractWALRoller.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index c5dd7a6c4641..fb9f3de91a35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -60,8 +60,16 @@ public abstract class AbstractWALRoller extends Thread protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; + /** + * Configure for the timeout of log rolling retry. + */ protected static final String WAL_ROLL_WAIT_TIMEOUT = "hbase.regionserver.logroll.wait.timeout.ms"; + /** + * Configure for the max count of log rolling retry. + * The real retry count is also limited by the timeout of log rolling + * via {@link #WAL_ROLL_WAIT_TIMEOUT} + */ protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; protected final ConcurrentMap wals = new ConcurrentHashMap<>(); @@ -123,7 +131,8 @@ protected AbstractWALRoller(String name, Configuration conf, T abortable) { this.checkLowReplicationInterval = conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); this.rollWaitTimeout = conf.getLong(WAL_ROLL_WAIT_TIMEOUT, 30000); - this.maxRollRetry = conf.getInt(WAL_ROLL_RETRIES, 2); + // retry rolling does not have to be the default behavior, so the default value is 0 here + this.maxRollRetry = conf.getInt(WAL_ROLL_RETRIES, 0); } /**