From 0e1db79f9acf722e69898dc5c426b29288f7827a Mon Sep 17 00:00:00 2001 From: mokai Date: Mon, 6 Mar 2023 16:27:42 +0800 Subject: [PATCH 1/3] =?UTF-8?q?HBASE-27638=20Get=20slow/large=20log=20resp?= =?UTF-8?q?onse=20that=20matched=20the=20=E2=80=98CLIENT=5FIP'=20without?= =?UTF-8?q?=20client=20port?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hbase/namequeues/LogHandlerUtils.java | 14 ++++- .../namequeues/TestNamedQueueRecorder.java | 54 +++++++++++++++++++ .../shell/commands/get_largelog_responses.rb | 6 +++ .../shell/commands/get_slowlog_responses.rb | 6 +++ 4 files changed, 79 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/LogHandlerUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/LogHandlerUtils.java index 1d7dd7ae686b..4cd18bf5a691 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/LogHandlerUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/LogHandlerUtils.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; @@ -68,7 +69,7 @@ private static List filterLogs( if (tableName != null && slowLogPayload.getRegionName().startsWith(tableName)) { totalFilterMatches++; } - if (slowLogPayload.getClientAddress().equals(clientAddress)) { + if (isClientAddressMatched(slowLogPayload, clientAddress)) { totalFilterMatches++; } if (slowLogPayload.getUserName().equals(userName)) { @@ -92,6 +93,17 @@ private static List filterLogs( return filteredSlowLogPayloads; } + private static boolean isClientAddressMatched(TooSlowLog.SlowLogPayload slowLogPayload, + String clientAddress) { + String clientAddressInPayload = slowLogPayload.getClientAddress(); + int portPos = clientAddressInPayload.lastIndexOf(Addressing.HOSTNAME_PORT_SEPARATOR); + if (portPos < 1) { + return clientAddressInPayload.equals(clientAddress); + } + return clientAddressInPayload.equals(clientAddress) + || clientAddressInPayload.substring(0, portPos).equals(clientAddress); + } + public static List getFilteredLogs( AdminProtos.SlowLogResponseRequest request, List logPayloadList) { int totalFilters = getTotalFiltersCount(request); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 05243bd93a6c..1cafc5bffac0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -381,6 +381,60 @@ public void testSlowLogFilters() throws Exception { HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(requestSlowLog).size() == 15)); } + @Test + public void testSlowLogFilterWithClientAddress() throws Exception { + Configuration conf = applySlowLogRecorderConf(10); + Constructor constructor = + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + constructor.setAccessible(true); + namedQueueRecorder = constructor.newInstance(conf); + AdminProtos.SlowLogResponseRequest request = + AdminProtos.SlowLogResponseRequest.newBuilder().build(); + Assert.assertEquals(getSlowLogPayloads(request).size(), 0); + + String[] clientAddressArray = new String[] { "[127:1:1:1:1:1:1:1]:1", "[127:1:1:1:1:1:1:1]:2", + "[127:1:1:1:1:1:1:1]:3", "127.0.0.1:1", "127.0.0.1:2" }; + boolean isSlowLog; + boolean isLargeLog; + for (int i = 0; i < 10; i++) { + if (i % 2 == 0) { + isSlowLog = true; + isLargeLog = false; + } else { + isSlowLog = false; + isLargeLog = true; + } + RpcLogDetails rpcLogDetails = getRpcLogDetails("userName_" + (i + 1), + clientAddressArray[i % 5], "class_" + (i + 1), isSlowLog, isLargeLog); + namedQueueRecorder.addRecord(rpcLogDetails); + } + + AdminProtos.SlowLogResponseRequest largeLogRequestIPv6WithPort = + AdminProtos.SlowLogResponseRequest.newBuilder() + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) + .setClientAddress("[127:1:1:1:1:1:1:1]:2").build(); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, + () -> getSlowLogPayloads(largeLogRequestIPv6WithPort).size() == 1)); + AdminProtos.SlowLogResponseRequest largeLogRequestIPv6WithoutPort = + AdminProtos.SlowLogResponseRequest.newBuilder() + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) + .setClientAddress("[127:1:1:1:1:1:1:1]").build(); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, + () -> getSlowLogPayloads(largeLogRequestIPv6WithoutPort).size() == 3)); + AdminProtos.SlowLogResponseRequest largeLogRequestIPv4WithPort = + AdminProtos.SlowLogResponseRequest.newBuilder() + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) + .setClientAddress("127.0.0.1:1").build(); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, + () -> getSlowLogPayloads(largeLogRequestIPv4WithPort).size() == 1)); + AdminProtos.SlowLogResponseRequest largeLogRequestIPv4WithoutPort = + AdminProtos.SlowLogResponseRequest.newBuilder() + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) + .setClientAddress("127.0.0.1").build(); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, + () -> getSlowLogPayloads(largeLogRequestIPv4WithoutPort).size() == 2)); + } + @Test public void testConcurrentSlowLogEvents() throws Exception { diff --git a/hbase-shell/src/main/ruby/shell/commands/get_largelog_responses.rb b/hbase-shell/src/main/ruby/shell/commands/get_largelog_responses.rb index 8ed55abfc10a..45257df7f17d 100644 --- a/hbase-shell/src/main/ruby/shell/commands/get_largelog_responses.rb +++ b/hbase-shell/src/main/ruby/shell/commands/get_largelog_responses.rb @@ -44,6 +44,12 @@ def help => get largelog responses only related to meta region hbase> get_largelog_responses '*', {'TABLE_NAME' => 't1'} => get largelog responses only related to t1 table + hbase> get_largelog_responses '*', {'CLIENT_IP' => '192.162.1.40'} + => get largelog responses only related to the given + client IP address + hbase> get_largelog_responses '*', {'CLIENT_IP' => '192.162.1.40:60225'} + => get largelog responses only related to the given + client IP address and port hbase> get_largelog_responses '*', {'CLIENT_IP' => '192.162.1.40:60225', 'LIMIT' => 100} => get largelog responses with given client IP address and get 100 records limit diff --git a/hbase-shell/src/main/ruby/shell/commands/get_slowlog_responses.rb b/hbase-shell/src/main/ruby/shell/commands/get_slowlog_responses.rb index 2dc108b1d68a..c53bedc81693 100644 --- a/hbase-shell/src/main/ruby/shell/commands/get_slowlog_responses.rb +++ b/hbase-shell/src/main/ruby/shell/commands/get_slowlog_responses.rb @@ -44,6 +44,12 @@ def help => get slowlog responses only related to meta region hbase> get_slowlog_responses '*', {'TABLE_NAME' => 't1'} => get slowlog responses only related to t1 table + hbase> get_slowlog_responses '*', {'CLIENT_IP' => '192.162.1.40'} + => get slowlog responses only related to the given + client IP address + hbase> get_slowlog_responses '*', {'CLIENT_IP' => '192.162.1.40:60225'} + => get slowlog responses only related to the given + client IP address and port hbase> get_slowlog_responses '*', {'CLIENT_IP' => '192.162.1.40:60225', 'LIMIT' => 100} => get slowlog responses with given client IP address and get 100 records limit From 1780ab6809a1cbf66a29da9c220c0288b694842b Mon Sep 17 00:00:00 2001 From: mokai Date: Wed, 13 Nov 2024 00:57:13 +0800 Subject: [PATCH 2/3] HBASE-28806 ExportSnapshot failed if reference file presented --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 11 ++++- .../hbase/snapshot/TestExportSnapshot.java | 43 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 186289e517ca..97ef8369cb4e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -28,6 +28,8 @@ import java.util.Comparator; import java.util.LinkedList; import java.util.List; +import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -659,6 +661,7 @@ private static List> getSnapshotFiles(final Configu // Get snapshot files LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list"); + Set existingFiles = new TreeSet<>(); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() { @Override @@ -678,7 +681,13 @@ public void storeFile(final RegionInfo regionInfo, final String family, snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family, referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1); } - files.add(snapshotFileAndSize); + String fileToExport = snapshotFileAndSize.getFirst().getHfile(); + if (!existingFiles.contains(fileToExport)) { + files.add(snapshotFileAndSize); + existingFiles.add(fileToExport); + } else { + LOG.debug("Skip the existing file: {}.", fileToExport); + } } }); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 133737bb3972..5685c0657df1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -106,6 +106,7 @@ public static void setUpBaseConf(Configuration conf) { // If a single node has enough failures (default 3), resource manager will blacklist it. // With only 2 nodes and tests injecting faults, we don't want that. conf.setInt("mapreduce.job.maxtaskfailures.per.tracker", 100); + conf.setInt("snapshot.export.default.map.group", 1); } @BeforeClass @@ -206,6 +207,48 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception { TEST_UTIL.deleteTable(tableName0); } + @Test + public void testExportFileSystemStateWithSplitRegion() throws Exception { + // disable compaction + admin.compactionSwitch(false, + admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); + // create Table + TableName splitTableName = TableName.valueOf(testName.getMethodName()); + String splitTableSnap = "snapshot-" + testName.getMethodName(); + admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies( + Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build()); + + // put some data + try (Table table = admin.getConnection().getTable(splitTableName)) { + table.put(new Put(Bytes.toBytes("row1")).addColumn(FAMILY, null, Bytes.toBytes("value1"))); + table.put(new Put(Bytes.toBytes("row2")).addColumn(FAMILY, null, Bytes.toBytes("value2"))); + table.put(new Put(Bytes.toBytes("row3")).addColumn(FAMILY, null, Bytes.toBytes("value3"))); + table.put(new Put(Bytes.toBytes("row4")).addColumn(FAMILY, null, Bytes.toBytes("value4"))); + table.put(new Put(Bytes.toBytes("row5")).addColumn(FAMILY, null, Bytes.toBytes("value5"))); + table.put(new Put(Bytes.toBytes("row6")).addColumn(FAMILY, null, Bytes.toBytes("value6"))); + table.put(new Put(Bytes.toBytes("row7")).addColumn(FAMILY, null, Bytes.toBytes("value7"))); + table.put(new Put(Bytes.toBytes("row8")).addColumn(FAMILY, null, Bytes.toBytes("value8"))); + // Flush to HFile + admin.flush(tableName); + } + + List regions = admin.getRegions(splitTableName); + assertEquals(1, regions.size()); + tableNumFiles = regions.size(); + + // split region + admin.split(splitTableName, Bytes.toBytes("row5")); + regions = admin.getRegions(splitTableName); + assertEquals(2, regions.size()); + + // take a snapshot + admin.snapshot(splitTableSnap, splitTableName); + // export snapshot and verify + testExportFileSystemState(splitTableName, splitTableSnap, splitTableSnap, tableNumFiles); + // delete table + TEST_UTIL.deleteTable(splitTableName); + } + @Test public void testExportFileSystemStateWithSkipTmp() throws Exception { TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true); From 3c12f7b08a15cd328e320a150e876658efaa0f0b Mon Sep 17 00:00:00 2001 From: mokai Date: Wed, 13 Nov 2024 01:11:48 +0800 Subject: [PATCH 3/3] Revert "HBASE-28806 ExportSnapshot failed if reference file presented" This reverts commit 1780ab6809a1cbf66a29da9c220c0288b694842b. --- .../hadoop/hbase/snapshot/ExportSnapshot.java | 11 +---- .../hbase/snapshot/TestExportSnapshot.java | 43 ------------------- 2 files changed, 1 insertion(+), 53 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 97ef8369cb4e..186289e517ca 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -28,8 +28,6 @@ import java.util.Comparator; import java.util.LinkedList; import java.util.List; -import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -661,7 +659,6 @@ private static List> getSnapshotFiles(final Configu // Get snapshot files LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list"); - Set existingFiles = new TreeSet<>(); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() { @Override @@ -681,13 +678,7 @@ public void storeFile(final RegionInfo regionInfo, final String family, snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family, referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1); } - String fileToExport = snapshotFileAndSize.getFirst().getHfile(); - if (!existingFiles.contains(fileToExport)) { - files.add(snapshotFileAndSize); - existingFiles.add(fileToExport); - } else { - LOG.debug("Skip the existing file: {}.", fileToExport); - } + files.add(snapshotFileAndSize); } }); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 5685c0657df1..133737bb3972 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -106,7 +106,6 @@ public static void setUpBaseConf(Configuration conf) { // If a single node has enough failures (default 3), resource manager will blacklist it. // With only 2 nodes and tests injecting faults, we don't want that. conf.setInt("mapreduce.job.maxtaskfailures.per.tracker", 100); - conf.setInt("snapshot.export.default.map.group", 1); } @BeforeClass @@ -207,48 +206,6 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception { TEST_UTIL.deleteTable(tableName0); } - @Test - public void testExportFileSystemStateWithSplitRegion() throws Exception { - // disable compaction - admin.compactionSwitch(false, - admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); - // create Table - TableName splitTableName = TableName.valueOf(testName.getMethodName()); - String splitTableSnap = "snapshot-" + testName.getMethodName(); - admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies( - Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build()); - - // put some data - try (Table table = admin.getConnection().getTable(splitTableName)) { - table.put(new Put(Bytes.toBytes("row1")).addColumn(FAMILY, null, Bytes.toBytes("value1"))); - table.put(new Put(Bytes.toBytes("row2")).addColumn(FAMILY, null, Bytes.toBytes("value2"))); - table.put(new Put(Bytes.toBytes("row3")).addColumn(FAMILY, null, Bytes.toBytes("value3"))); - table.put(new Put(Bytes.toBytes("row4")).addColumn(FAMILY, null, Bytes.toBytes("value4"))); - table.put(new Put(Bytes.toBytes("row5")).addColumn(FAMILY, null, Bytes.toBytes("value5"))); - table.put(new Put(Bytes.toBytes("row6")).addColumn(FAMILY, null, Bytes.toBytes("value6"))); - table.put(new Put(Bytes.toBytes("row7")).addColumn(FAMILY, null, Bytes.toBytes("value7"))); - table.put(new Put(Bytes.toBytes("row8")).addColumn(FAMILY, null, Bytes.toBytes("value8"))); - // Flush to HFile - admin.flush(tableName); - } - - List regions = admin.getRegions(splitTableName); - assertEquals(1, regions.size()); - tableNumFiles = regions.size(); - - // split region - admin.split(splitTableName, Bytes.toBytes("row5")); - regions = admin.getRegions(splitTableName); - assertEquals(2, regions.size()); - - // take a snapshot - admin.snapshot(splitTableSnap, splitTableName); - // export snapshot and verify - testExportFileSystemState(splitTableName, splitTableSnap, splitTableSnap, tableNumFiles); - // delete table - TEST_UTIL.deleteTable(splitTableName); - } - @Test public void testExportFileSystemStateWithSkipTmp() throws Exception { TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);