diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index bdb96ef3c14a..558d87ba77fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -291,7 +291,28 @@ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOExce * @throws IOException if a remote or network exception occurs */ default void deleteTable(TableName tableName) throws IOException { - get(deleteTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + get(deleteTableAsync(tableName, true), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + } + + /** + * Deletes a table. Synchronous operation. + * @param tableName name of table to delete + * @param archive if archive the table + * @throws IOException if a remote or network exception occurs + */ + default void deleteTable(TableName tableName, boolean archive) throws IOException { + get(deleteTableAsync(tableName, archive), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); + } + + /** + * backward compatible + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. + */ + default Future deleteTableAsync(TableName tableName) throws IOException { + return deleteTableAsync(tableName, true); } /** @@ -300,11 +321,12 @@ default void deleteTable(TableName tableName) throws IOException { * ExecutionException if there was an error while executing the operation or TimeoutException in * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete + * @param archive if archive the table * @throws IOException if a remote or network exception occurs * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future deleteTableAsync(TableName tableName) throws IOException; + Future deleteTableAsync(TableName tableName, boolean archive) throws IOException; /** * Truncate a table. Synchronous operation. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 9e2b990d91c1..6957ffb176f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -184,6 +184,11 @@ public Future deleteTableAsync(TableName tableName) throws IOException { return admin.deleteTable(tableName); } + @Override + public Future deleteTableAsync(TableName tableName, boolean archive) throws IOException { + return admin.deleteTable(tableName, archive); + } + @Override public Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 18a449e40083..adc69ff2959d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -195,7 +195,16 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ * Deletes a table. * @param tableName name of table to delete */ - CompletableFuture deleteTable(TableName tableName); + default CompletableFuture deleteTable(TableName tableName) { + return deleteTable(tableName, true); + } + + /** + * Deletes a table. + * @param tableName name of table to delete + * @param archive if need to archive the table + */ + CompletableFuture deleteTable(TableName tableName, boolean archive); /** * Truncate a table. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index ba016111cd3d..c3c2782db47b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -147,13 +147,13 @@ public CompletableFuture modifyTable(TableDescriptor desc) { } @Override - public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { - return wrap(rawAdmin.modifyTableStoreFileTracker(tableName, dstSFT)); + public CompletableFuture deleteTable(TableName tableName, boolean archive) { + return wrap(rawAdmin.deleteTable(tableName, archive)); } @Override - public CompletableFuture deleteTable(TableName tableName) { - return wrap(rawAdmin.deleteTable(tableName)); + public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { + return wrap(rawAdmin.modifyTableStoreFileTracker(tableName, dstSFT)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 4d614907326e..57dcefb20f5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -672,9 +672,10 @@ ModifyTableStoreFileTrackerResponse> procedureCall(tableName, } @Override - public CompletableFuture deleteTable(TableName tableName) { + public CompletableFuture deleteTable(TableName tableName, boolean archive) { return this. procedureCall(tableName, - RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()), + RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce(), + archive), (s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(), new DeleteTableProcedureBiConsumer(tableName)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index b2ddc4eaec15..2fc5fa3c1152 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -125,7 +125,13 @@ public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { } // 2 split to "table" and "cf1,cf2" // for each table: "table#cf1,cf2" or "table" - Iterator i = Splitter.on(':').split(tab).iterator(); + List pair = Splitter.on(':').splitToList(tab); + if (pair.size() > 2) { + LOG.info("incorrect format:" + tableCFsConfig); + continue; + } + assert pair.size() > 0; + Iterator i = pair.iterator(); String tabName = i.next().trim(); if (tabName.length() == 0) { LOG.info("incorrect format:" + tableCFsConfig); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 13e85f91c958..9cfcf7b73ed5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1018,11 +1018,12 @@ public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] region * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest */ public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, - final long nonceGroup, final long nonce) { + final long nonceGroup, final long nonce, final boolean archive) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); + builder.setArchive(archive); return builder.build(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 257abe8f11ca..c38b967516f4 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -152,6 +152,7 @@ message DeleteTableRequest { required TableName table_name = 1; optional uint64 nonce_group = 2 [default = 0]; optional uint64 nonce = 3 [default = 0]; + optional bool archive = 4 [default = true]; } message DeleteTableResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 816e4497f1f2..221f508349e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2431,11 +2431,35 @@ private static boolean isCatalogTable(final TableName tableName) { return tableName.equals(TableName.META_TABLE_NAME); } + private void checkSnapshot(TableName tableName, boolean archive) throws IOException { + /* + * If decide to delete the table without archive, need to make sure the table has no snapshot + * Meanwhile, the check will scan the snapshots which will do list and open, if there is lots of + * snapshot, the performance may be impacted, should evaluate the performance between directly + * archive and snapshot scan TODO: find some any way to get if the table snapshotted or not + */ + if (!archive) { + LOG.debug( + "Scan the snapshot to check if there is snapshot for:" + tableName.getNameAsString()); + + // List all the snapshots + List snapShotslist = snapshotManager.getCompletedSnapshots(); + List tableList = snapShotslist.stream().map(n -> n.getTable()) + .filter(c -> c.equals(tableName.getNameAsString())).collect(Collectors.toList()); + if (!tableList.isEmpty() || snapshotManager.isTakingSnapshot(tableName)) { + throw new DoNotRetryIOException("There is snapshot for the table and archive is needed"); + } + } + } + @Override - public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException { + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce, + final boolean archive) throws IOException { checkInitialized(); + // Check if there is snapshot for the table if without archive called for deleteTable + checkSnapshot(tableName, archive); + return MasterProcedureUtil .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override @@ -2449,8 +2473,8 @@ protected void run() throws IOException { // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure( - new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); + submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, + latch, archive)); latch.await(); getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e74505fa7fb9..bbd231f663d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -829,7 +829,7 @@ public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequ throws ServiceException { try { long procId = server.deleteTable(ProtobufUtil.toTableName(request.getTableName()), - request.getNonceGroup(), request.getNonce()); + request.getNonceGroup(), request.getNonce(), request.getArchive()); return DeleteTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index fbeb155a88ba..69cfcd74eba8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -138,11 +138,20 @@ long createTable(final TableDescriptor desc, final byte[][] splitKeys, final lon long createSystemTable(final TableDescriptor tableDescriptor) throws IOException; /** - * Delete a table + * Delete a table, this is used for backcompitible Unit test. * @param tableName The table name nnn */ - long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) - throws IOException; + default long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { + return deleteTable(tableName, nonceGroup, nonce, true); + } + + /** + * Delete a table + * @param tableName The table name nnnn + */ + long deleteTable(final TableName tableName, final long nonceGroup, final long nonce, + final boolean archive) throws IOException; /** * Truncate a table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 80fb5d0534d4..2a687c3b93a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -61,6 +61,7 @@ public class DeleteTableProcedure extends AbstractStateMachineTableProcedure regions; private TableName tableName; + private boolean archive; public DeleteTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -73,8 +74,15 @@ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableN public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName, final ProcedurePrepareLatch syncLatch) { + this(env, tableName, syncLatch, true); + } + + public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName, + final ProcedurePrepareLatch syncLatch, final boolean archive) { + super(env, syncLatch); this.tableName = tableName; + this.archive = archive; } @Override @@ -108,7 +116,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s break; case DELETE_TABLE_CLEAR_FS_LAYOUT: LOG.debug("Deleting regions from filesystem for {}", this); - DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, archive); setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); break; case DELETE_TABLE_REMOVE_FROM_META: @@ -298,7 +306,7 @@ protected static void deleteFromFs(final MasterProcedureEnv env, final TableName Path mobTableDir = CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName); Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName()); - if (fs.exists(regionDir)) { + if (fs.exists(regionDir) && archive) { HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 1598e6fbbb3b..67a8b24be603 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -1575,6 +1575,20 @@ public void deleteTable(TableName tableName) throws IOException { getAdmin().deleteTable(tableName); } + /** + * Drop an existing table + * @param tableName existing table + * @param archive if archive the data when delete the table + */ + public void deleteTable(TableName tableName, boolean archive) throws IOException { + try { + getAdmin().disableTable(tableName); + } catch (TableNotEnabledException e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + getAdmin().deleteTable(tableName, archive); + } + /** * Drop an existing table * @param tableName existing table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index bc3969ffd518..ee58adb3a796 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -247,6 +247,12 @@ public long deleteTable(final TableName tableName, final long nonceGroup, final return -1; } + @Override + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce, + final boolean archive) throws IOException { + return -1; + } + @Override public long truncateTable(final TableName tableName, final boolean preserveSplits, final long nonceGroup, final long nonce) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java new file mode 100644 index 000000000000..de3d842cbbee --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteTable.java @@ -0,0 +1,333 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.Random; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler; +import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +@Category(MediumTests.class) +public class TestDeleteTable { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDeleteTable.class); + + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private final static byte[] FAMILY1 = Bytes.toBytes("mob"); + private final static byte[] FAMILY2 = Bytes.toBytes("normal"); + + private final static byte[] QF = Bytes.toBytes("qualifier"); + private static Random random = new Random(); + + @Rule + public TestName name = new TestName(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + private static byte[] generateValue(int size) { + byte[] val = new byte[size]; + random.nextBytes(val); + return val; + } + + private TableDescriptor createTableDescriptor(TableName tableName) { + ColumnFamilyDescriptorBuilder builder1 = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY1); + ColumnFamilyDescriptorBuilder builder2 = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY2); + + builder1.setMobEnabled(true); + builder1.setMobThreshold(0); + + return TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(builder1.build()) + .setColumnFamily(builder2.build()).build(); + } + + private Table createTableWithFiles(TableDescriptor htd) throws IOException { + Table table = TEST_UTIL.createTable(htd, null); + try { + // insert data + byte[] value = generateValue(10); + byte[] row = Bytes.toBytes("row"); + Put put1 = new Put(row); + put1.addColumn(FAMILY1, QF, EnvironmentEdgeManager.currentTime(), value); + table.put(put1); + + Put put2 = new Put(row); + put2.addColumn(FAMILY2, QF, EnvironmentEdgeManager.currentTime(), value); + table.put(put2); + + // create an hfile + TEST_UTIL.getAdmin().flush(htd.getTableName()); + } catch (IOException e) { + table.close(); + throw e; + } + return table; + } + + @Test + public void testDeleteTableWithoutArchive() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = createTableDescriptor(tableName); + ColumnFamilyDescriptor hcd_mob = htd.getColumnFamily(FAMILY1); + ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + RegionInfo regionInfo = TEST_UTIL.getAdmin().getRegions(tableName).get(0); + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName, false); + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + @Test + public void testDeleteTableWithArchive() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = createTableDescriptor(tableName); + ColumnFamilyDescriptor hcd_mob = htd.getColumnFamily(FAMILY1); + ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + RegionInfo regionInfo = TEST_UTIL.getAdmin().getRegions(tableName).get(0); + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName, true); + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(1, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + @Test + public void testDeleteTableDefaultBehavior() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = createTableDescriptor(tableName); + ColumnFamilyDescriptor hcd_mob = htd.getColumnFamily(FAMILY1); + ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY2); + + Table table = createTableWithFiles(htd); + + RegionInfo regionInfo = TEST_UTIL.getAdmin().getRegions(tableName).get(0); + + // the mob file exists + Assert.assertEquals(1, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(0, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertTrue(mobTableDirExist(tableName)); + Assert.assertTrue(tableDirExist(tableName)); + + table.close(); + TEST_UTIL.deleteTable(tableName); + + Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName)); + Assert.assertEquals(0, countMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(1, countArchiveMobFiles(tableName, hcd_mob.getNameAsString())); + Assert.assertEquals(0, countRegionFiles(tableName, hcd.getNameAsString())); + Assert.assertEquals(1, countArchiveRegionFiles(tableName, hcd.getNameAsString(), regionInfo)); + Assert.assertFalse(mobTableDirExist(tableName)); + Assert.assertFalse(tableDirExist(tableName)); + } + + @Test + public void testDeleteTableWithSnapshot() throws Exception { + + // no snapshot + final TableName tn_no_snapshot = TableName.valueOf(name.getMethodName()); + // snapshot in progress + final TableName tn_snapshot_inprogress = + TableName.valueOf(name.getMethodName() + "_snapshot_inprogress"); + // complete snapshot + final TableName tn_snapshot = TableName.valueOf(name.getMethodName() + "_snapshot"); + + TableDescriptor htd_no_snapshot = createTableDescriptor(tn_no_snapshot); + TableDescriptor htd_snapshot_ingress = createTableDescriptor(tn_snapshot_inprogress); + TableDescriptor htd_snapshot = createTableDescriptor(tn_snapshot); + + Table tb_no_snapshot = createTableWithFiles(htd_no_snapshot); + Table tb_snapshot_ingress = createTableWithFiles(htd_snapshot_ingress); + Table tb_snapshot = createTableWithFiles(htd_snapshot); + + String snapshotName = name.getMethodName() + "-snapshot"; + TEST_UTIL.getAdmin().snapshot(snapshotName, tn_snapshot); + + // if delete the table with snapshot in progress without archive, there should be exception + Exception exception_withsnap_inprogress = + Assert.assertThrows(DoNotRetryIOException.class, () -> { + TEST_UTIL.deleteTable(tn_snapshot, false); + }); + Assert.assertTrue(exception_withsnap_inprogress.getMessage() + .contains("There is snapshot for the table and archive is needed")); + + // set a mock handler and make the table in the handler to mock the in process + DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class); + TEST_UTIL.getMiniHBaseCluster().getMaster().getSnapshotManager() + .setSnapshotHandlerForTesting(tn_snapshot_inprogress, mockHandler); + + // if delete the table with snapshot without archive, there should be exception + Exception exception_withsnap = Assert.assertThrows(DoNotRetryIOException.class, () -> { + TEST_UTIL.deleteTable(tn_snapshot_inprogress, false); + }); + Assert.assertTrue(exception_withsnap.getMessage() + .contains("There is snapshot for the table and archive is needed")); + + // test with correct step + TEST_UTIL.deleteTable(tn_no_snapshot, false); + TEST_UTIL.deleteTable(tn_snapshot_inprogress, true); + TEST_UTIL.deleteTable(tn_snapshot, true); + + tb_no_snapshot.close(); + tb_snapshot_ingress.close(); + tb_snapshot.close(); + + } + + private int countMobFiles(TableName tn, String familyName) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path mobFileDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName); + if (fs.exists(mobFileDir)) { + return fs.listStatus(mobFileDir).length; + } + return 0; + } + + private int countArchiveMobFiles(TableName tn, String familyName) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, + MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); + if (fs.exists(storePath)) { + return fs.listStatus(storePath).length; + } + return 0; + } + + private boolean mobTableDirExist(TableName tn) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path tableDir = + CommonFSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn); + return fs.exists(tableDir); + } + + private int countRegionFiles(TableName tn, String familyName) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + + if (TEST_UTIL.getAdmin().getRegions(tn).isEmpty()) { + return 0; + } + + RegionInfo regionInfo = TEST_UTIL.getAdmin().getRegions(tn).get(0); + + Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, regionInfo); + Path nfDir = new Path(regionDir, familyName); + + if (fs.exists(nfDir)) { + return fs.listStatus(nfDir).length; + } + return 0; + } + + private int countArchiveRegionFiles(TableName tn, String familyName, RegionInfo regionInfo) + throws IOException { + + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, + regionInfo.getEncodedName(), familyName); + + if (fs.exists(storePath)) { + return fs.listStatus(storePath).length; + } + return 0; + } + + private boolean tableDirExist(TableName tn) throws IOException { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, tn); + return fs.exists(tableDir); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 3c0658455f3a..0f99991469c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -175,6 +175,11 @@ public Future deleteTableAsync(TableName tableName) throws IOException { return admin.deleteTableAsync(tableName); } + @Override + public Future deleteTableAsync(TableName tableName, boolean archive) throws IOException { + return admin.deleteTableAsync(tableName, archive); + } + public Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException { return admin.truncateTableAsync(tableName, preserveSplits); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index e90e37c4bc63..d0d95f0fa327 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -443,13 +443,13 @@ def disabled?(table_name) #---------------------------------------------------------------------------------------------- # Drops a table - def drop(table_name) + def drop(table_name, archive=true) tableExists(table_name) raise ArgumentError, "Table #{table_name} is enabled. Disable it first." if enabled?( table_name ) - @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name), archive) end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell/commands/drop.rb b/hbase-shell/src/main/ruby/shell/commands/drop.rb index 063eb32773e8..9fed0ef2556a 100644 --- a/hbase-shell/src/main/ruby/shell/commands/drop.rb +++ b/hbase-shell/src/main/ruby/shell/commands/drop.rb @@ -22,14 +22,17 @@ module Commands class Drop < Command def help <<-EOF -Drop the named table. Table must first be disabled: +Drop the named table. Table must first be disabled: hbase> drop 't1' hbase> drop 'ns1:t1' +By default archive is enabled, you can swith off it by set it to false: + hbase> drop 't1',false + hbase> drop 'ns1:t1',false EOF end - def command(table) - admin.drop(table) + def command(table,archive=true) + admin.drop(table,archive) end end end diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index d3f5a51dc4ae..216f487457a4 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1810,6 +1810,20 @@ public void deleteTable(TableName tableName) throws IOException { getAdmin().deleteTable(tableName); } + /** + * Drop an existing table + * @param tableName existing table + * @param archive if archive the table + */ + public void deleteTable(TableName tableName, boolean archive) throws IOException { + try { + getAdmin().disableTable(tableName); + } catch (TableNotEnabledException e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + getAdmin().deleteTable(tableName, archive); + } + /** * Drop an existing table * @param tableName existing table diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 13a1b9920ecf..0a382d128e24 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1099,6 +1099,11 @@ public Future deleteTableAsync(TableName tableName) { throw new NotImplementedException("deleteTableAsync not supported in ThriftAdmin"); } + @Override + public Future deleteTableAsync(TableName tableName, boolean archive) { + throw new NotImplementedException("deleteTableAsync not supported in ThriftAdmin"); + } + @Override public Future truncateTableAsync(TableName tableName, boolean preserveSplits) { throw new NotImplementedException("truncateTableAsync not supported in ThriftAdmin");