Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ static Status splitLog(String filename, CancelableProgressable p, Configuration
// encountered a bad non-retry-able persistent error.
try {
SplitLogWorkerCoordination splitLogWorkerCoordination =
server.getCoordinatedStateManager().getSplitLogWorkerCoordination();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can splitLogWorkerCoordination be null and the reason? Thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

splitlogworkercoordination is only present when we are doing the old zk based log splitting. Otherwise, it is null.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1 on this fix, I hit this issue and figure out if we set HBASE_SPLIT_WAL_COORDINATED_BY_ZK in TestClusterRestartFailoverSplitWithoutZk, the test fails.

server.getCoordinatedStateManager() == null ? null
: server.getCoordinatedStateManager().getSplitLogWorkerCoordination();
if (!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), fs, conf,
p, sequenceIdChecker, splitLogWorkerCoordination, factory, server)) {
return Status.PREEMPTED;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,6 @@ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem w
Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker,
SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory,
RegionServerServices rsServices) throws IOException {
Preconditions.checkNotNull(splitLogWorkerCoordination,
"Can't be null; needed to propagate WAL corruption if any found");
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem rootFS = rootDir.getFileSystem(conf);
WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
Expand Down Expand Up @@ -172,13 +175,17 @@ public void testWALEntryFilter() throws IOException {
TEST_UTIL.waitFor(30000, () -> rs.getWalEntryFilter() != null);
WALEntryFilter wef = rs.getWalEntryFilter();
// Test non-system WAL edit.
WALEdit we = new WALEdit().add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).
setRow(HConstants.EMPTY_START_ROW).
setFamily(HConstants.CATALOG_FAMILY).
setType(Cell.Type.Put).build());
WAL.Entry e = new WAL.Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY,
TableName.valueOf("test"), -1), new WALEdit());
TableName.valueOf("test"), -1, -1, uuid), we);
assertTrue(wef.filter(e) == e);
// Test system WAL edit.
e = new WAL.Entry(
new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1),
new WALEdit());
new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1, -1, uuid),
we);
assertNull(wef.filter(e));
} finally {
rs.terminate("Done");
Expand Down Expand Up @@ -383,6 +390,10 @@ protected void doStart() {
protected void doStop() {
notifyStopped();
}

@Override public boolean canReplicateToSameCluster() {
return true;
}
}

/**
Expand Down