From cc712c272889effee521a33300106d1596a9d0c4 Mon Sep 17 00:00:00 2001 From: Mallikarjun Date: Mon, 4 Oct 2021 16:37:36 +0530 Subject: [PATCH] add rsgroup support for backup --- .../hadoop/hbase/backup/BackupAdmin.java | 2 +- .../hbase/backup/impl/BackupAdminImpl.java | 4 +- .../hbase/backup/impl/BackupCommands.java | 8 +- .../backup/impl/IncrementalBackupManager.java | 38 ++- .../hbase/backup/master/BackupLogCleaner.java | 23 +- .../hadoop/hbase/backup/TestBackupBase.java | 92 ++++++- .../hadoop/hbase/backup/TestBackupDelete.java | 6 +- .../hbase/backup/TestBackupDeleteRestore.java | 2 +- .../backup/TestBackupDeleteWithFailures.java | 2 +- .../hbase/backup/TestBackupDescribe.java | 2 +- .../TestBackupLogCleanerWithRsgroup.java | 122 +++++++++ .../hadoop/hbase/backup/TestBackupMerge.java | 6 +- .../backup/TestBackupMultipleDeletes.java | 14 +- .../hbase/backup/TestBackupShowHistory.java | 4 +- .../backup/TestBackupStatusProgress.java | 4 +- .../hadoop/hbase/backup/TestFullRestore.java | 26 +- .../hbase/backup/TestIncrementalBackup.java | 15 +- .../TestIncrementalBackupDeleteTable.java | 4 +- ...estIncrementalBackupMergeWithFailures.java | 6 +- .../TestIncrementalBackupWithBulkLoad.java | 8 +- .../TestIncrementalBackupWithFailures.java | 2 +- .../TestIncrementalBackupWithRsgroup.java | 231 ++++++++++++++++++ .../hadoop/hbase/backup/TestRemoteBackup.java | 5 +- .../hbase/backup/TestRemoteRestore.java | 4 +- .../backup/TestRepairAfterFailedDelete.java | 2 +- .../backup/TestRestoreBoundaryTests.java | 4 +- .../backup/master/TestBackupLogCleaner.java | 4 +- .../hadoop/hbase/rsgroup/RSGroupInfo.java | 5 +- .../hbase/IntegrationTestBackupRestore.java | 2 +- 29 files changed, 564 insertions(+), 83 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index ff1e13f79594..b8318ff40216 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -44,7 +44,7 @@ public interface BackupAdmin extends Closeable { * @return the backup Id */ - String backupTables(final BackupRequest userRequest) throws IOException; + BackupInfo backupTables(final BackupRequest userRequest) throws IOException; /** * Restore backup diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 0d20f37def6c..c2db709cc612 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -521,7 +521,7 @@ public void restore(RestoreRequest request) throws IOException { } @Override - public String backupTables(BackupRequest request) throws IOException { + public BackupInfo backupTables(BackupRequest request) throws IOException { BackupType type = request.getBackupType(); String targetRootDir = request.getTargetRootDir(); List tableList = request.getTableList(); @@ -604,7 +604,7 @@ public String backupTables(BackupRequest request) throws IOException { client.execute(); - return backupId; + return client.backupInfo; } private List excludeNonExistingTables(List tableList, diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index b0a29e257b07..8b1a19daeee8 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -40,11 +40,9 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC; - import java.io.IOException; import java.net.URI; import java.util.List; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -66,7 +64,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; @@ -345,8 +342,9 @@ public void execute() throws IOException { tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) .withTargetRootDir(targetBackupDir).withTotalTasks(workers) .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); - String backupId = admin.backupTables(request); - System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); + BackupInfo backupInfo = admin.backupTables(request); + System.out + .println("Backup session " + backupInfo.getBackupId() + " finished. Status: SUCCESS"); } catch (IOException e) { System.out.println("Backup session finished. Status: FAILURE"); throw e; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 847837f04424..16d4cca96fa7 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -21,8 +21,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -34,7 +37,9 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -94,13 +99,36 @@ public Map getIncrBackupLogFileMap() throws IOException { } newTimestamps = readRegionServerLastLogRollResult(); - logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); + logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode, + getParticipatingServerNames(backupInfo.getTables())); logList = excludeProcV2WALs(logList); backupInfo.setIncrBackupFileList(logList); return newTimestamps; } + private Set getParticipatingServerNames(Set tables) throws IOException { + Set
participatingServers = new HashSet<>(); + boolean flag = false; + for (TableName table : tables) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if (rsGroupInfo != null && !rsGroupInfo.getServers().isEmpty()) { + LOG.info("Participating servers for table {}, rsgroup Name: {} are: {}", table, + rsGroupInfo.getName(), rsGroupInfo.getServers()); + participatingServers.addAll(rsGroupInfo.getServers()); + } else { + LOG.warn( + "Rsgroup isn't available for table {}, all servers in the cluster will be participating ", + table); + flag = true; + } + } + + return flag ? + new HashSet<>() : + participatingServers.stream().map(a -> a.toString()).collect(Collectors.toSet()); + } + private List excludeProcV2WALs(List logList) { List list = new ArrayList<>(); for (int i=0; i < logList.size(); i++) { @@ -127,8 +155,8 @@ private List excludeProcV2WALs(List logList) { * @throws IOException exception */ private List getLogFilesForNewBackup(Map olderTimestamps, - Map newestTimestamps, Configuration conf, String savedStartCode) - throws IOException { + Map newestTimestamps, Configuration conf, String savedStartCode, + Set servers) throws IOException { LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps + "\n newestTimestamps: " + newestTimestamps); @@ -161,7 +189,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, for (FileStatus rs : rss) { p = rs.getPath(); host = BackupUtils.parseHostNameFromLogFile(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } FileStatus[] logs; @@ -216,7 +244,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, continue; } host = BackupUtils.parseHostFromOldLog(p); - if (host == null) { + if (host == null || (!servers.isEmpty() && !servers.contains(host))) { continue; } currentLogTS = BackupUtils.getCreationTime(p); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 79404b34e6de..e3d1b31c3a9c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -22,8 +22,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -39,6 +42,7 @@ import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -85,6 +89,20 @@ private Map getServersToOldestBackupMapping(List back Map serverAddressToLastBackupMap = new HashMap<>(); Map tableNameBackupInfoMap = new HashMap<>(); + Set
servers = new HashSet<>(); + for (BackupInfo backupInfo : backups) { + for (TableName table : backupInfo.getTables()) { + RSGroupInfo rsGroupInfo = conn.getAdmin().getRSGroup(table); + if (rsGroupInfo != null && rsGroupInfo.getServers() != null && !rsGroupInfo.getServers() + .isEmpty()) { + servers.addAll(rsGroupInfo.getServers()); + } else { + servers.addAll(conn.getAdmin().getRegionServers().stream().map(s -> s.getAddress()) + .collect(Collectors.toList())); + } + } + } + for (BackupInfo backupInfo : backups) { for (TableName table : backupInfo.getTables()) { tableNameBackupInfoMap.putIfAbsent(table, backupInfo.getStartTs()); @@ -92,7 +110,10 @@ private Map getServersToOldestBackupMapping(List back tableNameBackupInfoMap.put(table, backupInfo.getStartTs()); for (Map.Entry entry : backupInfo.getTableSetTimestampMap().get(table) .entrySet()) { - serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()), entry.getValue()); + if (servers.contains(Address.fromString(entry.getKey()))) { + serverAddressToLastBackupMap + .put(Address.fromString(entry.getKey()), entry.getValue()); + } } } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 8a06425d2224..131159dd367a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -18,14 +18,17 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -58,6 +61,9 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureTestUtil; @@ -85,6 +91,15 @@ public class TestBackupBase { protected static Configuration conf1; protected static Configuration conf2; + protected static final int RSGROUP_RS_NUM = 5; + protected static final int NUM_REGIONSERVERS = 3; + protected static final String RSGROUP_NAME = "rsgroup1"; + protected static final String RSGROUP_NAMESPACE = "rsgroup_ns"; + protected static final TableName RSGROUP_TABLE_1 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table1"); + protected static final TableName RSGROUP_TABLE_2 = + TableName.valueOf(RSGROUP_NAMESPACE + ":rsgroup_table2"); + protected static TableName table1 = TableName.valueOf("table1"); protected static TableDescriptor table1Desc; protected static TableName table2 = TableName.valueOf("table2"); @@ -106,6 +121,7 @@ public class TestBackupBase { protected static boolean autoRestoreOnFailure; protected static boolean useSecondCluster; + protected static boolean enableRSgroup; static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient { public IncrementalTableBackupClientForTest() { @@ -260,6 +276,22 @@ public void execute() throws IOException { } } + private static RSGroupInfo addGroup(String groupName, int serverCount) throws IOException { + Admin admin = TEST_UTIL.getAdmin(); + RSGroupInfo defaultInfo = admin.getRSGroup(RSGroupInfo.DEFAULT_GROUP); + admin.addRSGroup(groupName); + Set
set = new HashSet<>(); + for (Address server : defaultInfo.getServers()) { + if (set.size() == serverCount) { + break; + } + set.add(server); + } + admin.moveServersToRSGroup(set, groupName); + RSGroupInfo result = admin.getRSGroup(groupName); + return result; + } + public static void setUpHelper() throws Exception { BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; @@ -282,7 +314,13 @@ public static void setUpHelper() throws Exception { // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); - TEST_UTIL.startMiniCluster(); + if (enableRSgroup) { + conf1.setBoolean(RSGroupUtil.RS_GROUP_ENABLED, true); + TEST_UTIL.startMiniCluster(RSGROUP_RS_NUM + NUM_REGIONSERVERS); + addGroup(RSGROUP_NAME, RSGROUP_RS_NUM); + } else { + TEST_UTIL.startMiniCluster(); + } if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); @@ -322,6 +360,7 @@ public static void setUpHelper() throws Exception { public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; setUpHelper(); @@ -351,6 +390,7 @@ public static void tearDown() throws Exception { } TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniMapReduceCluster(); + enableRSgroup = false; autoRestoreOnFailure = true; useSecondCluster = false; } @@ -376,16 +416,16 @@ protected BackupRequest createBackupRequest(BackupType type, return request; } - protected String backupTables(BackupType type, List tables, String path) + protected BackupInfo backupTables(BackupType type, List tables, String path) throws IOException { Connection conn = null; BackupAdmin badmin = null; - String backupId; + BackupInfo backupInfo; try { conn = ConnectionFactory.createConnection(conf1); badmin = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(type, tables, path); - backupId = badmin.backupTables(request); + backupInfo = badmin.backupTables(request); } finally { if (badmin != null) { badmin.close(); @@ -394,14 +434,14 @@ protected String backupTables(BackupType type, List tables, String pa conn.close(); } } - return backupId; + return backupInfo; } - protected String fullTableBackup(List tables) throws IOException { + protected BackupInfo fullTableBackup(List tables) throws IOException { return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); } - protected String incrementalTableBackup(List tables) throws IOException { + protected BackupInfo incrementalTableBackup(List tables) throws IOException { return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); } @@ -449,6 +489,23 @@ protected static void createTables() throws Exception { table.close(); ha.close(); conn.close(); + + if (enableRSgroup) { + ha.createNamespace(NamespaceDescriptor.create(RSGROUP_NAMESPACE) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, RSGROUP_NAME).build()); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_1); + loadTable(table); + table.close(); + + ha.createTable(TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_2) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build()); + table = ConnectionFactory.createConnection(conf1).getTable(RSGROUP_TABLE_2); + loadTable(table); + table.close(); + } } protected boolean checkSucceeded(String backupId) throws IOException { @@ -471,7 +528,7 @@ protected boolean checkFailed(String backupId) throws IOException { return status.getState() == BackupState.FAILED; } - private BackupInfo getBackupInfo(String backupId) throws IOException { + protected BackupInfo getBackupInfo(String backupId) throws IOException { try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { BackupInfo status = table.readBackupInfo(backupId); return status; @@ -508,6 +565,25 @@ protected List getListOfWALFiles(Configuration c) throws IOException return logFiles; } + protected Set
getRsgroupServers(String rsgroupName) throws IOException { + RSGroupInfo rsGroupInfo = TEST_UTIL.getAdmin().getRSGroup(rsgroupName); + if (rsGroupInfo != null && rsGroupInfo.getServers() != null && !rsGroupInfo.getServers() + .isEmpty()) { + return new HashSet<>(rsGroupInfo.getServers()); + } + return new HashSet<>(); + } + + protected void checkIfWALFilesBelongToRsgroup(List walFiles, String rsgroupName) + throws IOException { + for (String file : walFiles) { + Address walServerAddress = + Address.fromString(BackupUtils.parseHostNameFromLogFile(new Path(file))); + assertTrue("Backed WAL files should be from RSGroup " + rsgroupName, + getRsgroupServers(rsgroupName).contains(walServerAddress)); + } + } + protected void dumpBackupDir() throws IOException { // Dump Backup Dir FileSystem fs = FileSystem.get(conf1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index bc8b346175a6..4d41e8cdad40 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -59,7 +59,7 @@ public class TestBackupDelete extends TestBackupBase { public void testBackupDelete() throws Exception { LOG.info("test backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; @@ -87,7 +87,7 @@ public void testBackupDelete() throws Exception { public void testBackupDeleteCommand() throws Exception { LOG.info("test backup delete on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -119,7 +119,7 @@ public long currentTime() { return System.currentTimeMillis() - 2 * 24 * 3600 * 1000 ; } }); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); EnvironmentEdgeManager.reset(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index f649b921b272..7b0527211df3 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -57,7 +57,7 @@ public void testBackupDeleteRestore() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); int numRows = TEST_UTIL.countRows(table1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 2ab6f55f5b06..bdcc2273c2eb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -144,7 +144,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures throws Exception { LOG.info("test repair backup delete on a single table with data and failures "+ failures[0]); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 6ab3d04feff4..cee5b60fbd5e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -83,7 +83,7 @@ public void testBackupDescribeCommand() throws Exception { LOG.info("test backup describe on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java new file mode 100644 index 000000000000..771adac27c6e --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleanerWithRsgroup.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(MediumTests.class) +public class TestBackupLogCleanerWithRsgroup extends TestBackupBase { + + @ClassRule public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupLogCleanerWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestBackupLogCleanerWithRsgroup.class); + + @BeforeClass public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + @Test public void testBackupLogCleanerRsgroup() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tableSetFullList = Lists.newArrayList(RSGROUP_TABLE_1); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + Map params = new HashMap<>(); + params.put(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster()); + cleaner.init(params); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(Iterables.size(deletable) == walFiles.size()); + String backupIdFull = fullTableBackup(tableSetFullList).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + assertTrue(Iterables.size(deletable) == walFiles.size()); + + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + Table t1 = conn.getTable(RSGROUP_TABLE_1); + Put p1; + Random rnd = new Random(); + for (int i = 0; i < 5000; i++) { + p1 = new Put(Bytes.toBytes(1000000 + rnd.nextInt(9000000))); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + t1.close(); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(newWalFiles.size() > Iterables.size(deletable)); + + // #3 - incremental backup + List tableSetIncList = Lists.newArrayList(RSGROUP_TABLE_1); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR).getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + deletable = cleaner.getDeletableFiles(newWalFiles); + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 1a8638c3b7dc..d29e5ad05d4b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -66,7 +66,7 @@ public void TestIncBackupMergeRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -87,7 +87,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -99,7 +99,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index 538488b4c4e4..8251db005978 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -63,7 +63,7 @@ public void testBackupMultipleDeletes() throws Exception { Admin admin = conn.getAdmin(); BackupAdmin client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table table1 Table t1 = conn.getTable(table1); @@ -78,7 +78,7 @@ public void testBackupMultipleDeletes() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc1 = client.backupTables(request); + String backupIdInc1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc1)); // #4 - insert some data to table table2 Table t2 = conn.getTable(table2); @@ -91,7 +91,7 @@ public void testBackupMultipleDeletes() throws Exception { // #5 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc2 = client.backupTables(request); + String backupIdInc2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc2)); // #6 - insert some data to table table1 t1 = conn.getTable(table1); @@ -103,7 +103,7 @@ public void testBackupMultipleDeletes() throws Exception { // #7 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc3 = client.backupTables(request); + String backupIdInc3 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc3)); // #8 - insert some data to table table2 t2 = conn.getTable(table2); @@ -115,17 +115,17 @@ public void testBackupMultipleDeletes() throws Exception { // #9 - incremental backup for table1, table2 tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc4 = client.backupTables(request); + String backupIdInc4 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc4)); // #10 full backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull2 = client.backupTables(request); + String backupIdFull2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull2)); // #11 - incremental backup for table3 tables = Lists.newArrayList(table3); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdInc5 = client.backupTables(request); + String backupIdInc5 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdInc5)); LOG.error("Delete backupIdInc2"); client.deleteBackups(new String[] { backupIdInc2 }); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index 4526070106d1..29a598e02b44 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -69,7 +69,7 @@ public void testBackupHistory() throws Exception { LOG.info("test backup history on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -93,7 +93,7 @@ public void testBackupHistory() throws Exception { assertTrue(output.indexOf(backupId) > 0); tableList = Lists.newArrayList(table2); - String backupId2 = fullTableBackup(tableList); + String backupId2 = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId2)); LOG.info("backup complete: " + table2); BackupInfo.Filter tableNameFilter = image -> { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java index 6d2091ea697c..12d72cd7edae 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -54,7 +54,7 @@ public void testBackupStatusProgress() throws Exception { LOG.info("test backup status/progress on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); @@ -71,7 +71,7 @@ public void testBackupStatusProgressCommand() throws Exception { LOG.info("test backup status/progress on a single table with data: command-line"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); ByteArrayOutputStream baos = new ByteArrayOutputStream(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index f5ad0d7b827e..21fecc342306 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -56,7 +56,7 @@ public void testFullRestoreSingle() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -77,7 +77,7 @@ public void testFullRestoreSingleCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -99,7 +99,7 @@ public void testFullRestoreCheckCommand() throws Exception { LOG.info("test full restore on a single table: command-line, check only"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] @@ -123,7 +123,7 @@ public void testFullRestoreCheckCommand() throws Exception { public void testFullRestoreMultiple() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -148,7 +148,7 @@ public void testFullRestoreMultiple() throws Exception { public void testFullRestoreMultipleCommand() throws Exception { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -179,7 +179,7 @@ public void testFullRestoreMultipleCommand() throws Exception { public void testFullRestoreSingleOverwrite() throws Exception { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -199,7 +199,7 @@ public void testFullRestoreSingleOverwrite() throws Exception { public void testFullRestoreSingleOverwriteCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; @@ -225,7 +225,7 @@ public void testFullRestoreMultipleOverwrite() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -244,7 +244,7 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { LOG.info("create full backup image on multiple tables: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; @@ -271,7 +271,7 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { public void testFullRestoreSingleDNE() throws Exception { LOG.info("test restore fails on a single table that does not exist"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -292,7 +292,7 @@ public void testFullRestoreSingleDNE() throws Exception { public void testFullRestoreSingleDNECommand() throws Exception { LOG.info("test restore fails on a single table that does not exist: command-line"); List tables = Lists.newArrayList(table1); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -317,7 +317,7 @@ public void testFullRestoreMultipleDNE() throws Exception { LOG.info("test restore fails on multiple tables that do not exist"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = @@ -338,7 +338,7 @@ public void testFullRestoreMultipleDNECommand() throws Exception { LOG.info("test restore fails on multiple tables that do not exist: command-line"); List tables = Lists.newArrayList(table2, table3); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index ea552b7945a3..334473498797 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup; import static org.junit.Assert.assertTrue; - import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -37,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -48,7 +48,6 @@ import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @Category(LargeTests.class) @@ -97,7 +96,7 @@ public void TestIncBackupRestore() throws Exception { Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table @@ -145,8 +144,11 @@ public void TestIncBackupRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); // add column family f2 to table1 // drop column family f3 @@ -165,8 +167,11 @@ public void TestIncBackupRestore() throws Exception { // #4 - additional incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), + RSGroupInfo.DEFAULT_GROUP); // #5 - restore full backup for all tables TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index 837de4dd6166..537435b436fd 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -69,7 +69,7 @@ public void testIncBackupDeleteTable() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -92,7 +92,7 @@ public void testIncBackupDeleteTable() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1bde63ba5527..47cca6ed8954 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -239,7 +239,7 @@ public void TestIncBackupMergeRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -260,7 +260,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); @@ -272,7 +272,7 @@ public void TestIncBackupMergeRestore() throws Exception { // #3 - incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple2)); // #4 Merge backup images with failures diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 60aa635045a7..e9225c5930cd 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -74,7 +74,7 @@ public void TestIncBackupDeleteTable() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); @@ -101,7 +101,7 @@ public void TestIncBackupDeleteTable() throws Exception { // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); + String backupIdIncMultiple = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 bulk load again LOG.debug("bulk loading into " + testName); @@ -114,7 +114,7 @@ public void TestIncBackupDeleteTable() throws Exception { // #5 - incremental backup for table1 tables = Lists.newArrayList(table1); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple1 = client.backupTables(request); + String backupIdIncMultiple1 = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple1)); // Delete all data in table1 TEST_UTIL.deleteTableData(table1); @@ -131,7 +131,7 @@ public void TestIncBackupDeleteTable() throws Exception { Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - backupIdFull = client.backupTables(request); + backupIdFull = client.backupTables(request).getBackupId(); try (final BackupSystemTable table = new BackupSystemTable(conn)) { Pair>>>>, List> pair = table.readBulkloadRows(tables); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index 00b13ba8dbf8..a0d44c1a4bc2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -95,7 +95,7 @@ public void testIncBackupRestore() throws Exception { BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); + String backupIdFull = client.backupTables(request).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java new file mode 100644 index 000000000000..b22c5ba76be2 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithRsgroup.java @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category(LargeTests.class) @RunWith(Parameterized.class) +public class TestIncrementalBackupWithRsgroup extends TestBackupBase { + + @ClassRule public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncrementalBackupWithRsgroup.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupWithRsgroup.class); + + public TestIncrementalBackupWithRsgroup(Boolean b) { + } + + @Parameterized.Parameters public static Collection data() { + List params = new ArrayList<>(); + params.add(new Object[] { Boolean.TRUE }); + return params; + } + + @BeforeClass public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + conf1 = TEST_UTIL.getConfiguration(); + enableRSgroup = true; + autoRestoreOnFailure = true; + useSecondCluster = false; + setUpHelper(); + } + + // implement all test cases in 1 test since incremental + // backup/restore has dependencies + @Test public void TestIncBackupRestore() throws Exception { + int ADD_ROWS = 99; + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + final byte[] fam3Name = Bytes.toBytes("f3"); + final byte[] mobName = Bytes.toBytes("mob"); + + TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_1) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true).setMobThreshold(5L) + .build()).build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + int NB_ROWS_FAM3 = 6; + insertIntoTable(conn, RSGROUP_TABLE_1, fam3Name, 3, NB_ROWS_FAM3).close(); + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_FAM3).close(); + Admin admin = conn.getAdmin(); + BackupAdminImpl client = new BackupAdminImpl(conn); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request).getBackupId(); + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table + Table t1 = insertIntoTable(conn, RSGROUP_TABLE_1, famName, 1, ADD_ROWS); + LOG.debug("writing " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + Assert + .assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + LOG.debug("written " + ADD_ROWS + " rows to " + RSGROUP_TABLE_1); + // additionally, insert rows to MOB cf + int NB_ROWS_MOB = 111; + insertIntoTable(conn, RSGROUP_TABLE_1, mobName, 3, NB_ROWS_MOB); + LOG.debug("written " + NB_ROWS_MOB + " rows to " + RSGROUP_TABLE_1 + " to Mob enabled CF"); + t1.close(); + Assert + .assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); + Table t2 = conn.getTable(RSGROUP_TABLE_2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2)); + t2.close(); + LOG.debug("written " + 5 + " rows to " + RSGROUP_TABLE_2); + // split RSGROUP_TABLE_1 + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + List regions = cluster.getRegions(RSGROUP_TABLE_1); + byte[] name = regions.get(0).getRegionInfo().getRegionName(); + long startSplitTime = EnvironmentEdgeManager.currentTime(); + try { + admin.splitRegionAsync(name).get(); + } catch (Exception e) { + // although split fail, this may not affect following check in current API, + // exception will be thrown. + LOG.debug("region is not splittable, because " + e); + } + while (!admin.isTableAvailable(RSGROUP_TABLE_1)) { + Thread.sleep(100); + } + long endSplitTime = EnvironmentEdgeManager.currentTime(); + // split finished + LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(RSGROUP_TABLE_1, RSGROUP_TABLE_2); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple = client.backupTables(request); + String backupIdIncMultiple = backupInfoIncMultiple.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple.getIncrBackupFileList(), RSGROUP_NAME); + + // add column family f2 to RSGROUP_TABLE_1 + // drop column family f3 + final byte[] fam2Name = Bytes.toBytes("f2"); + newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) + .build(); + TEST_UTIL.getAdmin().modifyTable(newTable1Desc); + + int NB_ROWS_FAM2 = 7; + Table t3 = insertIntoTable(conn, RSGROUP_TABLE_1, fam2Name, 2, NB_ROWS_FAM2); + t3.close(); + + // Wait for 5 sec to make sure that old WALs were deleted + Thread.sleep(5000); + + // #4 - additional incremental backup for multiple tables + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + BackupInfo backupInfoIncMultiple2 = client.backupTables(request); + String backupIdIncMultiple2 = backupInfoIncMultiple2.getBackupId(); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + checkIfWALFilesBelongToRsgroup(backupInfoIncMultiple2.getIncrBackupFileList(), RSGROUP_NAME); + + // #5 - restore full backup for all tables + TableName[] tablesRestoreFull = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; + + LOG.debug("Restoring full " + backupIdFull); + client.restore(BackupUtils + .createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, + tablesMapFull, true)); + + // #6.1 - check tables for full restore + Admin hAdmin = TEST_UTIL.getAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + hAdmin.close(); + + // #6.2 - checking row count of tables for full restore + Table hTable = conn.getTable(table1_restore); + Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + + // #7 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = new TableName[] { RSGROUP_TABLE_1, RSGROUP_TABLE_2 }; + TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + hTable = conn.getTable(table1_restore); + + LOG.debug("After incremental restore: " + hTable.getDescriptor()); + int countFamName = TEST_UTIL.countRows(hTable, famName); + LOG.debug("f1 has " + countFamName + " rows"); + Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS); + + int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name); + LOG.debug("f2 has " + countFam2Name + " rows"); + Assert.assertEquals(countFam2Name, NB_ROWS_FAM2); + + int countMobName = TEST_UTIL.countRows(hTable, mobName); + LOG.debug("mob has " + countMobName + " rows"); + Assert.assertEquals(countMobName, NB_ROWS_MOB); + hTable.close(); + + hTable = conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable)); + hTable.close(); + admin.close(); + } + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 4150d3fd2fc5..8a166e9c002d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -45,7 +44,6 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @Category(LargeTests.class) @@ -118,7 +116,8 @@ public void testFullBackupRemote() throws Exception { latch.countDown(); String backupId = - backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete " + backupId); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 8dd4f7924703..9c271aa3a58b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup; import static org.junit.Assert.assertTrue; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -63,7 +62,8 @@ public static void setUp() throws Exception { public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); String backupId = - backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); + backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR) + .getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 62a1f8f294cf..c5c20ed66278 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -50,7 +50,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { public void testRepairBackupDelete() throws Exception { LOG.info("test repair backup delete on a single table with data"); List tableList = Lists.newArrayList(table1); - String backupId = fullTableBackup(tableList); + String backupId = fullTableBackup(tableList).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index a6808cd69dc3..b5e3a28c0ae3 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -48,7 +48,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { @Test public void testFullRestoreSingleEmpty() throws Exception { LOG.info("test full restore on a single table empty table"); - String backupId = fullTableBackup(toList(table1.getNameAsString())); + String backupId = fullTableBackup(toList(table1.getNameAsString())).getBackupId(); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; @@ -70,7 +70,7 @@ public void testFullRestoreMultipleEmpty() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = toList(table2.getNameAsString(), table3.getNameAsString()); - String backupId = fullTableBackup(tables); + String backupId = fullTableBackup(tables).getBackupId(); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; getBackupAdmin().restore( diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 5363b1a44b4f..1cf47151d017 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -81,7 +81,7 @@ public void testBackupLogCleaner() throws Exception { // We can delete all files because we do not have yet recorded backup sessions assertTrue(size == walFiles.size()); - String backupIdFull = fullTableBackup(tableSetFullList); + String backupIdFull = fullTableBackup(tableSetFullList).getBackupId(); assertTrue(checkSucceeded(backupIdFull)); // Check one more time deletable = cleaner.getDeletableFiles(walFiles); @@ -121,7 +121,7 @@ public void testBackupLogCleaner() throws Exception { List tableSetIncList = Lists.newArrayList(table1, table2, table3); String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, - BACKUP_ROOT_DIR); + BACKUP_ROOT_DIR).getBackupId(); assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(newWalFiles); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index bb4a4d7c6228..d903e5c04bc5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.NavigableSet; import java.util.Objects; import java.util.SortedSet; import java.util.TreeSet; @@ -40,7 +41,7 @@ public class RSGroupInfo { private final String name; // Keep servers in a sorted set so has an expected ordering when displayed. - private final SortedSet
servers; + private final NavigableSet
servers; // Keep tables sorted too. /** @@ -111,7 +112,7 @@ public boolean containsServer(Address hostPort) { /** * Get list of servers. */ - public SortedSet
getServers() { + public NavigableSet
getServers() { return servers; } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index f57b9b6ab587..6e848dc847e6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -235,7 +235,7 @@ private void loadData(TableName table, int numRows) throws IOException { private String backup(BackupRequest request, BackupAdmin client) throws IOException { - String backupId = client.backupTables(request); + String backupId = client.backupTables(request).getBackupId(); return backupId; }