From f00066af9419ace5468648b4a8c7fe195289f026 Mon Sep 17 00:00:00 2001 From: gvprathyusha6 <70918688+gvprathyusha6@users.noreply.github.com> Date: Sat, 2 Nov 2024 04:35:30 +0530 Subject: [PATCH] HBASE-28564 Refactor direct interactions of Reference file creations to SFT interface (#5939) Signed-off-by: Andrew Purtell Conflicts: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java --- .../org/apache/hadoop/hbase/io/HFileLink.java | 2 +- .../org/apache/hadoop/hbase/io/Reference.java | 2 +- .../hadoop/hbase/io/hfile/CacheConfig.java | 1 - .../MergeTableRegionsProcedure.java | 2 +- .../assignment/SplitTableRegionProcedure.java | 37 ++-- .../hbase/master/janitor/CatalogJanitor.java | 15 +- .../hadoop/hbase/mob/CachedMobFile.java | 5 +- .../hbase/mob/ExpiredMobFileCleaner.java | 8 +- .../org/apache/hadoop/hbase/mob/MobFile.java | 7 +- .../apache/hadoop/hbase/mob/MobFileCache.java | 11 +- .../hadoop/hbase/mob/MobFileCleanerChore.java | 2 +- .../hadoop/hbase/mob/MobFileCleanupUtil.java | 24 +-- .../org/apache/hadoop/hbase/mob/MobUtils.java | 15 +- .../regionserver/DataTieringManager.java | 2 +- .../hadoop/hbase/regionserver/HMobStore.java | 7 +- .../hadoop/hbase/regionserver/HRegion.java | 25 ++- .../hbase/regionserver/HRegionFileSystem.java | 84 ++------ .../hadoop/hbase/regionserver/HStore.java | 10 +- .../hadoop/hbase/regionserver/HStoreFile.java | 5 +- .../hbase/regionserver/StoreEngine.java | 7 +- .../hbase/regionserver/StoreFileInfo.java | 63 +++--- .../DefaultStoreFileTracker.java | 50 ++++- .../FileBasedStoreFileTracker.java | 19 +- .../storefiletracker/StoreFileTracker.java | 25 +++ .../StoreFileTrackerBase.java | 128 ++++++++++++ .../StoreFileTrackerFactory.java | 8 +- .../hbase/snapshot/RestoreSnapshotHelper.java | 59 +++--- .../hbase/snapshot/SnapshotManifest.java | 20 +- .../hbase/snapshot/SnapshotManifestV1.java | 43 +++- .../hbase/util/ServerRegionReplicaUtil.java | 9 +- .../compaction/MajorCompactionRequest.java | 15 +- .../hbase-webapps/regionserver/storeFile.jsp | 2 +- .../client/TestTableSnapshotScanner.java | 18 +- .../hbase/io/TestHalfStoreFileReader.java | 38 +++- .../hbase/io/hfile/TestBytesReadFromFs.java | 2 +- .../hadoop/hbase/io/hfile/TestPrefetch.java | 24 ++- .../io/hfile/TestPrefetchWithBucketCache.java | 13 +- .../master/janitor/TestCatalogJanitor.java | 48 ++++- .../hadoop/hbase/mob/TestCachedMobFile.java | 23 ++- .../hbase/mob/TestExpiredMobFileCleaner.java | 7 + .../apache/hadoop/hbase/mob/TestMobFile.java | 11 +- .../hadoop/hbase/mob/TestMobFileCache.java | 63 +++--- .../hbase/mob/TestMobStoreCompaction.java | 13 +- ...bstractTestDateTieredCompactionPolicy.java | 7 +- .../regionserver/DataBlockEncodingTool.java | 3 +- .../EncodedSeekPerformanceTest.java | 10 +- .../hbase/regionserver/MockHStoreFile.java | 19 +- .../TestCacheOnWriteInSchema.java | 3 +- .../TestCompactionArchiveIOException.java | 6 +- .../regionserver/TestCompactionPolicy.java | 7 +- .../regionserver/TestCompoundBloomFilter.java | 3 +- .../TestCustomCellDataTieringManager.java | 7 +- .../TestCustomCellTieredCompactionPolicy.java | 10 +- .../regionserver/TestDataTieringManager.java | 8 +- .../TestDirectStoreSplitsMerges.java | 45 ++-- .../regionserver/TestFSErrorsExposed.java | 10 +- .../hbase/regionserver/TestHRegion.java | 12 +- .../regionserver/TestHRegionFileSystem.java | 17 +- .../hadoop/hbase/regionserver/TestHStore.java | 8 +- .../hbase/regionserver/TestHStoreFile.java | 194 ++++++++++++------ .../TestMergesSplitsAddToTracker.java | 35 +++- .../TestRegionMergeTransactionOnCluster.java | 14 +- .../regionserver/TestReversibleScanners.java | 25 ++- .../TestRowPrefixBloomFilter.java | 6 +- .../TestSplitTransactionOnCluster.java | 13 +- .../hbase/regionserver/TestStoreFileInfo.java | 35 ++-- .../TestStoreFileRefresherChore.java | 32 +-- ...estStoreFileScannerWithTagCompression.java | 2 +- .../regionserver/TestStoreScannerClosure.java | 10 +- .../TestStripeStoreFileManager.java | 5 +- .../FailingStoreFileTrackerForTest.java | 42 ++++ .../StoreFileTrackerForTest.java | 7 + .../snapshot/TestSnapshotStoreFileSize.java | 9 +- .../TestMajorCompactionRequest.java | 19 +- .../TestMajorCompactionTTLRequest.java | 4 + 75 files changed, 1133 insertions(+), 466 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FailingStoreFileTrackerForTest.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index a036a90d7cf7..dc7ac7338acc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -463,7 +463,7 @@ public static String createFromHFileLink(final Configuration conf, final FileSys * Create the back reference name */ // package-private for testing - static String createBackReferenceName(final String tableNameStr, final String regionName) { + public static String createBackReferenceName(final String tableNameStr, final String regionName) { return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index 337fde60cf7d..22d3c9ce2c0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -195,7 +195,7 @@ public static Reference convert(final FSProtos.Reference r) { * delimiter, pb reads to EOF which may not be what you want). * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. */ - byte[] toByteArray() throws IOException { + public byte[] toByteArray() throws IOException { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index ae196340db61..e65bfc34073f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -162,7 +162,6 @@ public class CacheConfig implements PropagatingConfigurationObserver { private final ByteBuffAllocator byteBuffAllocator; - /** * Create a cache configuration using the specified configuration object and defaults for family * level settings. Only use if no column family context. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 8203a3458372..4c42229e3e6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -644,7 +644,7 @@ private List mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem reg // to read the hfiles. storeFileInfo.setConf(storeConfiguration); Path refFile = mergeRegionFs.mergeStoreFile(regionFs.getRegionInfo(), family, - new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); + new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED), tracker); mergedFiles.add(refFile); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 3250680d57bb..3e43079003fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -701,8 +701,9 @@ private Pair, List> splitStoreFiles(final MasterProcedureEnv en // table dir. In case of failure, the proc would go through this again, already existing // region dirs and split files would just be ignored, new split files should get created. int nbFiles = 0; - final Map> files = - new HashMap>(htd.getColumnFamilyCount()); + final Map, StoreFileTracker>> files = + new HashMap, StoreFileTracker>>( + htd.getColumnFamilyCount()); for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) { String family = cfd.getNameAsString(); StoreFileTracker tracker = @@ -725,7 +726,7 @@ private Pair, List> splitStoreFiles(final MasterProcedureEnv en } if (filteredSfis == null) { filteredSfis = new ArrayList(sfis.size()); - files.put(family, filteredSfis); + files.put(family, new Pair(filteredSfis, tracker)); } filteredSfis.add(sfi); nbFiles++; @@ -748,10 +749,12 @@ private Pair, List> splitStoreFiles(final MasterProcedureEnv en final List>> futures = new ArrayList>>(nbFiles); // Split each store file. - for (Map.Entry> e : files.entrySet()) { + for (Map.Entry, StoreFileTracker>> e : files + .entrySet()) { byte[] familyName = Bytes.toBytes(e.getKey()); final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); - final Collection storeFiles = e.getValue(); + Pair, StoreFileTracker> storeFilesAndTracker = e.getValue(); + final Collection storeFiles = storeFilesAndTracker.getFirst(); if (storeFiles != null && storeFiles.size() > 0) { final Configuration storeConfiguration = StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd); @@ -762,8 +765,9 @@ private Pair, List> splitStoreFiles(final MasterProcedureEnv en // is running in a regionserver's Store context, or we might not be able // to read the hfiles. storeFileInfo.setConf(storeConfiguration); - StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, - new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); + StoreFileSplitter sfs = + new StoreFileSplitter(regionFs, storeFilesAndTracker.getSecond(), familyName, + new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED)); futures.add(threadPool.submit(sfs)); } } @@ -829,8 +833,8 @@ private void assertSplitResultFilesCount(final FileSystem fs, } } - private Pair splitStoreFile(HRegionFileSystem regionFs, byte[] family, HStoreFile sf) - throws IOException { + private Pair splitStoreFile(HRegionFileSystem regionFs, StoreFileTracker tracker, + byte[] family, HStoreFile sf) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("pid=" + getProcId() + " splitting started for store file: " + sf.getPath() + " for region: " + getParentRegion().getShortNameToLog()); @@ -838,10 +842,10 @@ private Pair splitStoreFile(HRegionFileSystem regionFs, byte[] famil final byte[] splitRow = getSplitRow(); final String familyName = Bytes.toString(family); - final Path path_first = - regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow, false, splitPolicy); - final Path path_second = - regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow, true, splitPolicy); + final Path path_first = regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow, + false, splitPolicy, tracker); + final Path path_second = regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow, + true, splitPolicy, tracker); if (LOG.isDebugEnabled()) { LOG.debug("pid=" + getProcId() + " splitting complete for store file: " + sf.getPath() + " for region: " + getParentRegion().getShortNameToLog()); @@ -857,6 +861,7 @@ private class StoreFileSplitter implements Callable> { private final HRegionFileSystem regionFs; private final byte[] family; private final HStoreFile sf; + private final StoreFileTracker tracker; /** * Constructor that takes what it needs to split @@ -864,15 +869,17 @@ private class StoreFileSplitter implements Callable> { * @param family Family that contains the store file * @param sf which file */ - public StoreFileSplitter(HRegionFileSystem regionFs, byte[] family, HStoreFile sf) { + public StoreFileSplitter(HRegionFileSystem regionFs, StoreFileTracker tracker, byte[] family, + HStoreFile sf) { this.regionFs = regionFs; this.sf = sf; this.family = family; + this.tracker = tracker; } @Override public Pair call() throws IOException { - return splitStoreFile(regionFs, family, sf); + return splitStoreFile(regionFs, tracker, family, sf); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java index 8b482b3ae019..71d1d1ced63f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java @@ -34,6 +34,8 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; @@ -49,6 +51,8 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; @@ -421,7 +425,16 @@ private static Pair checkRegionReferences(MasterServices servi try { HRegionFileSystem regionFs = HRegionFileSystem .openRegionFromFileSystem(services.getConfiguration(), fs, tabledir, region, true); - boolean references = regionFs.hasReferences(tableDescriptor); + ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); + boolean references = false; + for (ColumnFamilyDescriptor cfd : families) { + StoreFileTracker sft = StoreFileTrackerFactory.create(services.getConfiguration(), + tableDescriptor, ColumnFamilyDescriptorBuilder.of(cfd.getNameAsString()), regionFs); + references = references || sft.hasReferences(); + if (references) { + break; + } + } return new Pair<>(Boolean.TRUE, references); } catch (IOException e) { LOG.error("Error trying to determine if region {} has references, assuming it does", diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java index 1c1145a2f482..cdf941878119 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.yetus.audience.InterfaceAudience; /** @@ -41,10 +42,10 @@ public CachedMobFile(HStoreFile sf) { } public static CachedMobFile create(FileSystem fs, Path path, Configuration conf, - CacheConfig cacheConf) throws IOException { + CacheConfig cacheConf, StoreFileTracker sft) throws IOException { // XXX: primaryReplica is only used for constructing the key of block cache so it is not a // critical problem if we pass the wrong value, so here we always pass true. Need to fix later. - HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); + HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true, sft); return new CachedMobFile(sf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java index 3c02d483c0b6..9ecda5ec8cb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java @@ -57,17 +57,17 @@ public class ExpiredMobFileCleaner extends Configured implements Tool { * @param tableName The current table name. * @param family The current family. */ - public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) + public void cleanExpiredMobFiles(TableDescriptor htd, ColumnFamilyDescriptor family) throws IOException { Configuration conf = getConf(); - TableName tn = TableName.valueOf(tableName); + String tableName = htd.getTableName().getNameAsString(); FileSystem fs = FileSystem.get(conf); LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName); // disable the block cache. Configuration copyOfConf = new Configuration(conf); copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); CacheConfig cacheConfig = new CacheConfig(copyOfConf); - MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig, + MobUtils.cleanExpiredMobFiles(fs, conf, htd, family, cacheConfig, EnvironmentEdgeManager.currentTime()); } @@ -107,7 +107,7 @@ public int run(String[] args) throws Exception { throw new IOException( "The minVersions of the column family is not 0, could not be handled by this cleaner"); } - cleanExpiredMobFiles(tableName, family); + cleanExpiredMobFiles(htd, family); return 0; } finally { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java index 3293208771ac..de7f61032ed5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.yetus.audience.InterfaceAudience; /** @@ -133,11 +134,11 @@ public void close() throws IOException { * @param cacheConf The CacheConfig. * @return An instance of the MobFile. */ - public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf) - throws IOException { + public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf, + StoreFileTracker sft) throws IOException { // XXX: primaryReplica is only used for constructing the key of block cache so it is not a // critical problem if we pass the wrong value, so here we always pass true. Need to fix later. - HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); + HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true, sft); return new MobFile(sf); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java index b353b53ffb71..45ec006f97f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java @@ -33,6 +33,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.IdLock; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -198,9 +201,11 @@ public void evictFile(String fileName) { * @param cacheConf The current MobCacheConfig * @return A opened mob file. */ - public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException { + public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf, + StoreContext storeContext) throws IOException { + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); if (!isCacheEnabled) { - MobFile mobFile = MobFile.create(fs, path, conf, cacheConf); + MobFile mobFile = MobFile.create(fs, path, conf, cacheConf, sft); mobFile.open(); return mobFile; } else { @@ -214,7 +219,7 @@ public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws if (map.size() > mobFileMaxCacheSize) { evict(); } - cached = CachedMobFile.create(fs, path, conf, cacheConf); + cached = CachedMobFile.create(fs, path, conf, cacheConf, sft); cached.open(); map.put(fileName, cached); miss.increment(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java index c4bada278df6..9ce20e7c650e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java @@ -92,7 +92,7 @@ protected void chore() { for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) { try { - cleaner.cleanExpiredMobFiles(htd.getTableName().getNameAsString(), hcd); + cleaner.cleanExpiredMobFiles(htd, hcd); } catch (IOException e) { LOG.error("Failed to clean the expired mob files table={} family={}", htd.getTableName().getNameAsString(), hcd.getNameAsString(), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java index 049192624ef3..a1b2d4a792e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanupUtil.java @@ -35,7 +35,11 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -90,7 +94,10 @@ public static void cleanupObsoleteMobFiles(Configuration conf, TableName table, Set allActiveMobFileName = new HashSet(); for (Path regionPath : regionDirs) { regionNames.add(regionPath.getName()); + HRegionFileSystem regionFS = + HRegionFileSystem.create(conf, fs, tableDir, MobUtils.getMobRegionInfo(table)); for (ColumnFamilyDescriptor hcd : list) { + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, htd, hcd, regionFS, false); String family = hcd.getNameAsString(); Path storePath = new Path(regionPath, family); boolean succeed = false; @@ -102,26 +109,19 @@ public static void cleanupObsoleteMobFiles(Configuration conf, TableName table, + " execution, aborting MOB file cleaner chore.", storePath); throw new IOException(errMsg); } - RemoteIterator rit = fs.listLocatedStatus(storePath); - List storeFiles = new ArrayList(); - // Load list of store files first - while (rit.hasNext()) { - Path p = rit.next().getPath(); - if (fs.isFile(p)) { - storeFiles.add(p); - } - } - LOG.info("Found {} store files in: {}", storeFiles.size(), storePath); + List storeFileInfos = sft.load(); + LOG.info("Found {} store files in: {}", storeFileInfos.size(), storePath); Path currentPath = null; try { - for (Path pp : storeFiles) { + for (StoreFileInfo storeFileInfo : storeFileInfos) { + Path pp = storeFileInfo.getPath(); currentPath = pp; LOG.trace("Store file: {}", pp); HStoreFile sf = null; byte[] mobRefData = null; byte[] bulkloadMarkerData = null; try { - sf = new HStoreFile(fs, pp, conf, CacheConfig.DISABLED, BloomType.NONE, true); + sf = new HStoreFile(storeFileInfo, BloomType.NONE, CacheConfig.DISABLED); sf.initReader(); mobRefData = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS); bulkloadMarkerData = sf.getMetadataValue(HStoreFile.BULKLOAD_TASK_KEY); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index b6b8be9d1791..c8e6fac9ceda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -59,9 +59,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -266,7 +269,7 @@ public static void setCacheMobBlocks(Scan scan, boolean cacheBlocks) { * @param cacheConfig The cacheConfig that disables the block cache. * @param current The current time. */ - public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName, + public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableDescriptor htd, ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current) throws IOException { long timeToLive = columnDescriptor.getTimeToLive(); @@ -287,7 +290,11 @@ public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, Table LOG.info("MOB HFiles older than " + expireDate.toGMTString() + " will be deleted!"); FileStatus[] stats = null; + TableName tableName = htd.getTableName(); Path mobTableDir = CommonFSUtils.getTableDir(getMobHome(conf), tableName); + HRegionFileSystem regionFS = + HRegionFileSystem.create(conf, fs, mobTableDir, getMobRegionInfo(tableName)); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, htd, columnDescriptor, regionFS); Path path = getMobFamilyPath(conf, tableName, columnDescriptor.getNameAsString()); try { stats = fs.listStatus(path); @@ -318,7 +325,7 @@ public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, Table LOG.debug("{} is an expired file", fileName); } filesToClean - .add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true)); + .add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true, sft)); if ( filesToClean.size() >= conf.getInt(MOB_CLEANER_BATCH_SIZE_UPPER_BOUND, DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND) @@ -387,6 +394,10 @@ public static Path getMobTableDir(Path rootDir, TableName tableName) { return CommonFSUtils.getTableDir(getMobHome(rootDir), tableName); } + public static Path getMobTableDir(Configuration conf, TableName tableName) { + return getMobTableDir(new Path(conf.get(HConstants.HBASE_DIR)), tableName); + } + /** * Gets the region dir of the mob files. It's * {HBASE_DIR}/mobdir/data/{namespace}/{tableName}/{regionEncodedName}. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java index 8443827ccaa1..2a5e2a5aa39d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java @@ -254,7 +254,7 @@ private HStore getHStore(Path hFilePath) throws DataTieringException { private HStoreFile getHStoreFile(Path hFilePath) throws DataTieringException { HStore hStore = getHStore(hFilePath); for (HStoreFile file : hStore.getStorefiles()) { - if (file.getPath().equals(hFilePath)) { + if (file.getPath().toUri().getPath().toString().equals(hFilePath.toString())) { return file; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index d4b24de33cc3..468f478dbc48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobStoreEngine; import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.IdLock; import org.apache.yetus.audience.InterfaceAudience; @@ -280,8 +282,9 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, getStoreContext()); storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, - isPrimaryReplicaStore()); + isPrimaryReplicaStore(), sft); storeFile.initReader(); } catch (IOException e) { LOG.error("Fail to open mob file[" + path + "], keep it in temp directory.", e); @@ -405,7 +408,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, getCacheConfig()); + file = mobFileCache.openFile(fs, path, getCacheConfig(), this.getStoreContext()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e550e8ba8df5..f75e8f5ac5e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -156,6 +156,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.metrics.MetricsTableRequests; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; @@ -1314,7 +1316,9 @@ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) { // Only construct StoreFileInfo object if its not a hfile, save obj // creation - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status); + StoreFileTracker sft = + StoreFileTrackerFactory.create(conf, tableDescriptor, family, regionFs); + StoreFileInfo storeFileInfo = sft.getStoreFileInfo(status, status.getPath(), false); hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs)); } else if (StoreFileInfo.isHFile(p)) { // If its a HFile, then lets just add to the block distribution @@ -5307,9 +5311,12 @@ long replayRecoveredEditsIfAny(Map maxSeqIdInStores, // column family. Have to fake out file type too by casting our recovered.edits as // storefiles String fakeFamilyName = WALSplitUtil.getRegionDirRecoveredEditsDir(regionWALDir).getName(); + StoreContext storeContext = + StoreContext.getBuilder().withRegionFileSystem(getRegionFileSystem()).build(); + StoreFileTracker sft = StoreFileTrackerFactory.create(this.conf, true, storeContext); Set fakeStoreFiles = new HashSet<>(files.size()); for (Path file : files) { - fakeStoreFiles.add(new HStoreFile(walFS, file, this.conf, null, null, true)); + fakeStoreFiles.add(new HStoreFile(walFS, file, this.conf, null, null, true, sft)); } getRegionWALFileSystem().archiveRecoveredEdits(fakeFamilyName, fakeStoreFiles); } else { @@ -6295,17 +6302,15 @@ void replayWALBulkLoadEventMarker(WALProtos.BulkLoadDescriptor bulkLoadEvent) th continue; } - List storeFiles = storeDescriptor.getStoreFileList(); - for (String storeFile : storeFiles) { - StoreFileInfo storeFileInfo = null; + StoreContext storeContext = store.getStoreContext(); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); + + List storeFiles = sft.load(); + for (StoreFileInfo storeFileInfo : storeFiles) { try { - storeFileInfo = fs.getStoreFileInfo(Bytes.toString(family), storeFile); store.bulkLoadHFile(storeFileInfo); } catch (FileNotFoundException ex) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + ((storeFileInfo != null) - ? storeFileInfo.toString() - : (new Path(Bytes.toString(family), storeFile)).toString()) + LOG.warn(getRegionInfo().getEncodedName() + " : " + storeFileInfo.toString() + " doesn't exist any more. Skip loading the file"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index c77f4d4aefde..b80599fd61a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -255,51 +255,6 @@ public String getStoragePolicyName(String familyName) { return null; } - /** - * Returns the store files available for the family. This methods performs the filtering based on - * the valid store files. - * @param familyName Column Family Name - * @return a set of {@link StoreFileInfo} for the specified family. - */ - public List getStoreFiles(final String familyName) throws IOException { - return getStoreFiles(familyName, true); - } - - /** - * Returns the store files available for the family. This methods performs the filtering based on - * the valid store files. - * @param familyName Column Family Name - * @return a set of {@link StoreFileInfo} for the specified family. - */ - public List getStoreFiles(final String familyName, final boolean validate) - throws IOException { - Path familyDir = getStoreDir(familyName); - FileStatus[] files = CommonFSUtils.listStatus(this.fs, familyDir); - if (files == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("No StoreFiles for: " + familyDir); - } - return null; - } - - ArrayList storeFiles = new ArrayList<>(files.length); - for (FileStatus status : files) { - if (validate && !StoreFileInfo.isValid(status)) { - // recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to - // true, refer HBASE-23740 - if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) { - LOG.warn("Invalid StoreFile: {}", status.getPath()); - } - continue; - } - StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, - regionInfoForFs, familyName, status.getPath()); - storeFiles.add(info); - - } - return storeFiles; - } - /** * Returns the store files' LocatedFileStatus which available for the family. This methods * performs the filtering based on the valid store files. @@ -350,11 +305,11 @@ Path getStoreFilePath(final String familyName, final String fileName) { * @param fileName File Name * @return The {@link StoreFileInfo} for the specified family/file */ - StoreFileInfo getStoreFileInfo(final String familyName, final String fileName) - throws IOException { + StoreFileInfo getStoreFileInfo(final String familyName, final String fileName, + final StoreFileTracker tracker) throws IOException { Path familyDir = getStoreDir(familyName); return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, - familyName, new Path(familyDir, fileName)); + familyName, new Path(familyDir, fileName), tracker); } /** @@ -379,20 +334,6 @@ public boolean hasReferences(final String familyName) throws IOException { return false; } - /** - * Check whether region has Reference file - * @param htd table desciptor of the region - * @return true if region has reference file - */ - public boolean hasReferences(final TableDescriptor htd) throws IOException { - for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) { - if (hasReferences(family.getNameAsString())) { - return true; - } - } - return false; - } - /** Returns the set of families present on disk n */ public Collection getFamilies() throws IOException { FileStatus[] fds = @@ -628,7 +569,7 @@ private void insertRegionFilesIntoStoreTracker(List allFiles, MasterProced tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs)); fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>()); List infos = fileInfoMap.get(familyName); - infos.add(new StoreFileInfo(conf, fs, file, true)); + infos.add(trackerMap.get(familyName).getStoreFileInfo(file, true)); } for (Map.Entry entry : trackerMap.entrySet()) { entry.getValue().add(fileInfoMap.get(entry.getKey())); @@ -672,7 +613,7 @@ public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws I * @return Path to created reference. */ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow, - boolean top, RegionSplitPolicy splitPolicy) throws IOException { + boolean top, RegionSplitPolicy splitPolicy, StoreFileTracker tracker) throws IOException { Path splitDir = new Path(getSplitsDir(hri), familyName); // Add the referred-to regions name as a dot separated suffix. // See REF_NAME_REGEX regex above. The referred-to regions name is @@ -758,7 +699,8 @@ public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte // A reference to the bottom half of the hsf store file. Reference r = top ? Reference.createTopReference(splitRow) : Reference.createBottomReference(splitRow); - return r.write(fs, p); + tracker.createReference(r, p); + return p; } // =========================================================================== @@ -799,8 +741,8 @@ static boolean mkdirs(FileSystem fs, Configuration conf, Path dir) throws IOExce * @return Path to created reference. * @throws IOException if the merge write fails. */ - public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFile f) - throws IOException { + public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFile f, + StoreFileTracker tracker) throws IOException { Path referenceDir = new Path(getMergesDir(regionInfoForFs), familyName); // A whole reference to the store file. Reference r = Reference.createTopReference(mergingRegion.getStartKey()); @@ -812,7 +754,8 @@ public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFi // Write reference with same file id only with the other region name as // suffix and into the new region location (under same family). Path p = new Path(referenceDir, f.getPath().getName() + "." + mergingRegionName); - return r.write(fs, p); + tracker.createReference(r, p); + return p; } /** @@ -1222,4 +1165,9 @@ private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSl } Thread.sleep((long) baseSleepBeforeRetries * sleepMultiplier); } + + public static HRegionFileSystem create(final Configuration conf, final FileSystem fs, + final Path tableDir, final RegionInfo regionInfo) throws IOException { + return new HRegionFileSystem(conf, fs, tableDir, regionInfo); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 1f37d08f7299..d63633e5311d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -88,6 +88,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.EncryptionUtil; @@ -431,7 +433,7 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } - StoreContext getStoreContext() { + public StoreContext getStoreContext() { return storeContext; } @@ -1399,8 +1401,9 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); StoreFileInfo storeFileInfo = - getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput, sft); HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -2043,8 +2046,9 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); StoreFileInfo storeFileInfo = - getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file, sft); HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java index 14627ebc9389..d52abdba1fc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -226,8 +227,8 @@ public long getMaxMemStoreTS() { * @param primaryReplica true if this is a store file for primary replica, otherwise false. */ public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf, - BloomType cfBloomType, boolean primaryReplica) throws IOException { - this(new StoreFileInfo(conf, fs, p, primaryReplica), cfBloomType, cacheConf); + BloomType cfBloomType, boolean primaryReplica, StoreFileTracker sft) throws IOException { + this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java index a696032d59f4..20e8328f0efa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java @@ -220,8 +220,7 @@ public StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws I } public HStoreFile createStoreFileAndReader(Path p) throws IOException { - StoreFileInfo info = new StoreFileInfo(conf, ctx.getRegionFileSystem().getFileSystem(), p, - ctx.isPrimaryReplicaStore()); + StoreFileInfo info = storeFileTracker.getStoreFileInfo(p, ctx.isPrimaryReplicaStore()); return createStoreFileAndReader(info); } @@ -373,8 +372,8 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles - .add(ctx.getRegionFileSystem().getStoreFileInfo(ctx.getFamily().getNameAsString(), file)); + storeFiles.add(ctx.getRegionFileSystem().getStoreFileInfo(ctx.getFamily().getNameAsString(), + file, storeFileTracker)); } refreshStoreFilesInternal(storeFiles); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 290f8b799ca9..6d866613dd16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -35,10 +35,12 @@ import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileInfo; +import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @@ -111,20 +113,9 @@ public class StoreFileInfo implements Configurable { // done. private final AtomicInteger refCount = new AtomicInteger(0); - /** - * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. - * @param initialPath The {@link Path} of the file - * @param primaryReplica true if this is a store file for primary replica, otherwise false. - */ - public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath, - final boolean primaryReplica) throws IOException { - this(conf, fs, null, initialPath, primaryReplica); - } - private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Path initialPath, final boolean primaryReplica) throws IOException { + final Path initialPath, final boolean primaryReplica, final StoreFileTracker sft) + throws IOException { assert fs != null; assert initialPath != null; assert conf != null; @@ -142,7 +133,7 @@ private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileS this.link = HFileLink.buildFromHFileLinkPattern(conf, p); LOG.trace("{} is a link", p); } else if (isReference(p)) { - this.reference = Reference.read(fs, p); + this.reference = sft.readReference(p); Path referencePath = getReferredToFile(p); if (HFileLink.isHFileLink(referencePath)) { // HFileLink Reference @@ -169,17 +160,6 @@ private StoreFileInfo(final Configuration conf, final FileSystem fs, final FileS } } - /** - * Create a Store File Info - * @param conf the {@link Configuration} to use - * @param fs The current file system to use. - * @param fileStatus The {@link FileStatus} of the file - */ - public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus) - throws IOException { - this(conf, fs, fileStatus, fileStatus.getPath(), true); - } - /** * Create a Store File Info from an HFileLink * @param conf The {@link Configuration} to use @@ -224,6 +204,29 @@ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileSt this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); } + /** + * Create a Store File Info from an HFileLink and a Reference + * @param conf The {@link Configuration} to use + * @param fs The current file system to use + * @param fileStatus The {@link FileStatus} of the file + * @param reference The reference instance + * @param link The link instance + */ + public StoreFileInfo(final Configuration conf, final FileSystem fs, final long createdTimestamp, + final Path initialPath, final long size, final Reference reference, final HFileLink link, + final boolean primaryReplica) { + this.fs = fs; + this.conf = conf; + this.primaryReplica = primaryReplica; + this.initialPath = initialPath; + this.createdTimestamp = createdTimestamp; + this.size = size; + this.reference = reference; + this.link = link; + this.noReadahead = + this.conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD); + } + @Override public Configuration getConf() { return conf; @@ -769,4 +772,14 @@ int decreaseRefCount() { return this.refCount.decrementAndGet(); } + public static StoreFileInfo createStoreFileInfoForHFile(final Configuration conf, + final FileSystem fs, final Path initialPath, final boolean primaryReplica) throws IOException { + if (HFileLink.isHFileLink(initialPath) || isReference(initialPath)) { + throw new InvalidHFileException("Path " + initialPath + " is a Hfile link or a Regerence"); + } + StoreFileInfo storeFileInfo = + new StoreFileInfo(conf, fs, null, initialPath, primaryReplica, null); + return storeFileInfo; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java index 128537f10afe..035e6c76f85a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java @@ -18,13 +18,21 @@ package org.apache.hadoop.hbase.regionserver.storefiletracker; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The default implementation for store file tracker, where we do not persist the store file list, @@ -37,6 +45,8 @@ public DefaultStoreFileTracker(Configuration conf, boolean isPrimaryReplica, Sto super(conf, isPrimaryReplica, ctx); } + private static final Logger LOG = LoggerFactory.getLogger(DefaultStoreFileTracker.class); + @Override public boolean requireWritingToTmpDirFirst() { return true; @@ -55,12 +65,48 @@ protected void doAddCompactionResults(Collection compactedFiles, @Override protected List doLoadStoreFiles(boolean readOnly) throws IOException { - List files = - ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString()); + List files = getStoreFiles(ctx.getFamily().getNameAsString()); return files != null ? files : Collections.emptyList(); } @Override protected void doSetStoreFiles(Collection files) throws IOException { } + + /** + * Returns the store files available for the family. This methods performs the filtering based on + * the valid store files. + * @param familyName Column Family Name + * @return a set of {@link StoreFileInfo} for the specified family. + */ + public List getStoreFiles(final String familyName) throws IOException { + Path familyDir = ctx.getRegionFileSystem().getStoreDir(familyName); + FileStatus[] files = + CommonFSUtils.listStatus(ctx.getRegionFileSystem().getFileSystem(), familyDir); + if (files == null) { + if (LOG.isTraceEnabled()) { + LOG.trace("No StoreFiles for: " + familyDir); + } + return null; + } + + ArrayList storeFiles = new ArrayList<>(files.length); + for (FileStatus status : files) { + if (!StoreFileInfo.isValid(status)) { + // recovered.hfiles directory is expected inside CF path when + // hbase.wal.split.to.hfile to + // true, refer HBASE-23740 + if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) { + LOG.warn("Invalid StoreFile: {}", status.getPath()); + } + continue; + } + StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, + ctx.getRegionFileSystem().getFileSystem(), ctx.getRegionInfo(), + ctx.getRegionFileSystem().getRegionInfoForFS(), familyName, status.getPath(), this); + storeFiles.add(info); + + } + return storeFiles; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java index d3dfe21521d7..b000d837d59b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileEntry; import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileList; @@ -53,6 +55,7 @@ class FileBasedStoreFileTracker extends StoreFileTrackerBase { private final StoreFileListFile backedFile; private final Map storefiles = new HashMap<>(); + private static final Logger LOG = LoggerFactory.getLogger(FileBasedStoreFileTracker.class); public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { super(conf, isPrimaryReplica, ctx); @@ -69,6 +72,10 @@ public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, S @Override protected List doLoadStoreFiles(boolean readOnly) throws IOException { StoreFileList list = backedFile.load(readOnly); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded file list backed file, containing " + list.getStoreFileList().size() + + " store file entries"); + } if (list == null) { return Collections.emptyList(); } @@ -77,7 +84,7 @@ protected List doLoadStoreFiles(boolean readOnly) throws IOExcept for (StoreFileEntry entry : list.getStoreFileList()) { infos.add(ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, ctx.getRegionInfo(), ctx.getRegionFileSystem().getRegionInfoForFS(), ctx.getFamily().getNameAsString(), - new Path(ctx.getFamilyStoreDirectoryPath(), entry.getName()))); + new Path(ctx.getFamilyStoreDirectoryPath(), entry.getName()), this)); } // In general, for primary replica, the load method should only be called once when // initialization, so we do not need synchronized here. And for secondary replicas, though the @@ -115,6 +122,9 @@ protected void doAddNewStoreFiles(Collection newFiles) throws IOE builder.addStoreFile(toStoreFileEntry(info)); } backedFile.update(builder); + if (LOG.isTraceEnabled()) { + LOG.trace(newFiles.size() + " store files added to store file list file: " + newFiles); + } for (StoreFileInfo info : newFiles) { storefiles.put(info.getPath().getName(), info); } @@ -138,6 +148,10 @@ protected void doAddCompactionResults(Collection compactedFiles, builder.addStoreFile(toStoreFileEntry(info)); } backedFile.update(builder); + if (LOG.isTraceEnabled()) { + LOG.trace( + "replace compacted files: " + compactedFileNames + " with new store files: " + newFiles); + } for (String name : compactedFileNames) { storefiles.remove(name); } @@ -157,6 +171,9 @@ protected void doSetStoreFiles(Collection files) throws IOExcepti builder.addStoreFile(toStoreFileEntry(info)); } backedFile.update(builder); + if (LOG.isTraceEnabled()) { + LOG.trace("Set store files in store file list file: " + files); + } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java index b0024b73786a..12343b50dd37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java @@ -20,7 +20,10 @@ import java.io.IOException; import java.util.Collection; import java.util.List; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.regionserver.CreateStoreFileWriterParams; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; @@ -94,4 +97,26 @@ void replace(Collection compactedFiles, Collection * does not allow broken store files under the actual data directory. */ boolean requireWritingToTmpDirFirst(); + + Reference createReference(Reference reference, Path path) throws IOException; + + /** + * Reads the reference file from the given path. + * @param path the {@link Path} to the reference file in the file system. + * @return a {@link Reference} that points at top/bottom half of a an hfile + */ + Reference readReference(Path path) throws IOException; + + /** + * Returns true if the specified family has reference files + * @return true if family contains reference files + */ + boolean hasReferences() throws IOException; + + StoreFileInfo getStoreFileInfo(final FileStatus fileStatus, final Path initialPath, + final boolean primaryReplica) throws IOException; + + StoreFileInfo getStoreFileInfo(final Path initialPath, final boolean primaryReplica) + throws IOException; + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index 794a707062e5..5d0b5b4ae08d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -19,13 +19,22 @@ import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; +import java.io.BufferedInputStream; +import java.io.DataInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.Collection; import java.util.List; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -37,11 +46,14 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + /** * Base class for all store file tracker. *

@@ -191,6 +203,122 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th return builder.build(); } + @Override + public Reference createReference(Reference reference, Path path) throws IOException { + FSDataOutputStream out = ctx.getRegionFileSystem().getFileSystem().create(path, false); + try { + out.write(reference.toByteArray()); + } finally { + out.close(); + } + return reference; + } + + /** + * Returns true if the specified family has reference files + * @param familyName Column Family Name + * @return true if family contains reference files + */ + public boolean hasReferences() throws IOException { + Path storeDir = ctx.getRegionFileSystem().getStoreDir(ctx.getFamily().getNameAsString()); + FileStatus[] files = + CommonFSUtils.listStatus(ctx.getRegionFileSystem().getFileSystem(), storeDir); + if (files != null) { + for (FileStatus stat : files) { + if (stat.isDirectory()) { + continue; + } + if (StoreFileInfo.isReference(stat.getPath())) { + LOG.trace("Reference {}", stat.getPath()); + return true; + } + } + } + return false; + } + + @Override + public Reference readReference(final Path p) throws IOException { + InputStream in = ctx.getRegionFileSystem().getFileSystem().open(p); + try { + // I need to be able to move back in the stream if this is not a pb serialization so I can + // do the Writable decoding instead. + in = in.markSupported() ? in : new BufferedInputStream(in); + int pblen = ProtobufUtil.lengthOfPBMagic(); + in.mark(pblen); + byte[] pbuf = new byte[pblen]; + IOUtils.readFully(in, pbuf, 0, pblen); + // WATCHOUT! Return in middle of function!!! + if (ProtobufUtil.isPBMagicPrefix(pbuf)) { + return Reference.convert( + org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos.Reference.parseFrom(in)); + } + // Else presume Writables. Need to reset the stream since it didn't start w/ pb. + // We won't bother rewriting thie Reference as a pb since Reference is transitory. + in.reset(); + Reference r = new Reference(); + DataInputStream dis = new DataInputStream(in); + // Set in = dis so it gets the close below in the finally on our way out. + in = dis; + r.readFields(dis); + return r; + } finally { + in.close(); + } + } + + @Override + public StoreFileInfo getStoreFileInfo(Path initialPath, boolean primaryReplica) + throws IOException { + return getStoreFileInfo(null, initialPath, primaryReplica); + } + + @Override + public StoreFileInfo getStoreFileInfo(FileStatus fileStatus, Path initialPath, + boolean primaryReplica) throws IOException { + FileSystem fs = this.ctx.getRegionFileSystem().getFileSystem(); + assert fs != null; + assert initialPath != null; + assert conf != null; + Reference reference = null; + HFileLink link = null; + long createdTimestamp = 0; + long size = 0; + Path p = initialPath; + if (HFileLink.isHFileLink(p)) { + // HFileLink + reference = null; + link = HFileLink.buildFromHFileLinkPattern(conf, p); + LOG.trace("{} is a link", p); + } else if (StoreFileInfo.isReference(p)) { + reference = readReference(p); + Path referencePath = StoreFileInfo.getReferredToFile(p); + if (HFileLink.isHFileLink(referencePath)) { + // HFileLink Reference + link = HFileLink.buildFromHFileLinkPattern(conf, referencePath); + } else { + // Reference + link = null; + } + LOG.trace("{} is a {} reference to {}", p, reference.getFileRegion(), referencePath); + } else + if (StoreFileInfo.isHFile(p) || StoreFileInfo.isMobFile(p) || StoreFileInfo.isMobRefFile(p)) { + // HFile + if (fileStatus != null) { + createdTimestamp = fileStatus.getModificationTime(); + size = fileStatus.getLen(); + } else { + FileStatus fStatus = fs.getFileStatus(initialPath); + createdTimestamp = fStatus.getModificationTime(); + size = fStatus.getLen(); + } + } else { + throw new IOException("path=" + p + " doesn't look like a valid StoreFile"); + } + return new StoreFileInfo(conf, fs, createdTimestamp, initialPath, size, reference, link, + isPrimaryReplica); + } + /** * For primary replica, we will call load once when opening a region, and the implementation could * choose to do some cleanup work. So here we use {@code readOnly} to indicate that whether you diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java index 0f487afd1cba..828f1974fca7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java @@ -129,10 +129,16 @@ public static StoreFileTracker create(Configuration conf, boolean isPrimaryRepli */ public static StoreFileTracker create(Configuration conf, TableDescriptor td, ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) { + return create(conf, td, cfd, regionFs, true); + } + + public static StoreFileTracker create(Configuration conf, TableDescriptor td, + ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs, boolean isPrimaryReplica) { StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs) .withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build(); - return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx); + return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), isPrimaryReplica, + ctx); } private static Configuration mergeConfigurations(Configuration global, TableDescriptor table, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index dab581041ee5..958e2d6faabb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; import org.apache.hadoop.hbase.security.access.TablePermission; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.RestoreMetaChanges; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -500,6 +501,9 @@ private void restoreRegion(final RegionInfo regionInfo, for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { byte[] family = Bytes.toBytes(familyDir.getName()); + StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, + StoreContext.getBuilder().withColumnFamilyDescriptor(tableDesc.getColumnFamily(family)) + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); Set familyFiles = getTableRegionFamilyFiles(familyDir); List snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName()); @@ -512,7 +516,7 @@ private void restoreRegion(final RegionInfo regionInfo, familyFiles.remove(storeFile.getName()); // no need to restore already present files, but we need to add those to tracker filesToTrack - .add(new StoreFileInfo(conf, fs, new Path(familyDir, storeFile.getName()), true)); + .add(tracker.getStoreFileInfo(new Path(familyDir, storeFile.getName()), true)); } else { // HFile missing hfilesToAdd.add(storeFile); @@ -533,9 +537,10 @@ private void restoreRegion(final RegionInfo regionInfo, for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) { LOG.debug("Restoring missing HFileLink " + storeFile.getName() + " of snapshot=" + snapshotName + " to region=" + regionInfo.getEncodedName() + " table=" + tableName); - String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); + String fileName = + restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs, tracker); // mark the reference file to be added to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); + filesToTrack.add(tracker.getStoreFileInfo(new Path(familyDir, fileName), true)); } } else { // Family doesn't exists in the snapshot @@ -545,10 +550,6 @@ private void restoreRegion(final RegionInfo regionInfo, fs.delete(familyDir, true); } - StoreFileTracker tracker = - StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() - .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); - // simply reset list of tracked files with the matching files // and the extra one present in the snapshot tracker.set(filesToTrack); @@ -569,14 +570,14 @@ private void restoreRegion(final RegionInfo regionInfo, for (SnapshotRegionManifest.StoreFile storeFile : familyEntry.getValue()) { LOG.trace("Adding HFileLink (Not present in the table) " + storeFile.getName() + " of snapshot " + snapshotName + " to table=" + tableName); - String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); - files.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); + String fileName = + restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs, tracker); + files.add(tracker.getStoreFileInfo(new Path(familyDir, fileName), true)); } tracker.set(files); } } - /** Returns The set of files in the specified family directory. */ private Set getTableRegionFamilyFiles(final Path familyDir) throws IOException { FileStatus[] hfiles = CommonFSUtils.listStatus(fs, familyDir); if (hfiles == null) { @@ -659,6 +660,16 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); List clonedFiles = new ArrayList<>(); + Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName()); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); + + Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, + tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + snapshotName + " to table=" + tableName); @@ -669,24 +680,15 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, if (fs.exists(mobPath)) { fs.delete(mobPath, true); } - restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs); + restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs, tracker); } else { - String file = restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs); - clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true)); + String file = + restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs, tracker); + clonedFiles.add(tracker.getStoreFileInfo(new Path(familyDir, file), true)); } } // we don't need to track files under mobdir if (!MobUtils.isMobRegionInfo(newRegionInfo)) { - Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) - ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) - : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); - - Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, - tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); - StoreFileTracker tracker = - StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder() - .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); tracker.set(clonedFiles); } } @@ -720,13 +722,13 @@ private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionIn * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) */ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) - throws IOException { + final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef, + final StoreFileTracker tracker) throws IOException { String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { return HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName, createBackRef); } else if (StoreFileInfo.isReference(hfileName)) { - return restoreReferenceFile(familyDir, regionInfo, storeFile); + return restoreReferenceFile(familyDir, regionInfo, storeFile, tracker); } else { return HFileLink.create(conf, fs, familyDir, regionInfo, hfileName, createBackRef); } @@ -756,7 +758,8 @@ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInf * @param storeFile reference file name */ private String restoreReferenceFile(final Path familyDir, final RegionInfo regionInfo, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile, final StoreFileTracker tracker) + throws IOException { String hfileName = storeFile.getName(); // Extract the referred information (hfile name and parent region) @@ -790,7 +793,7 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio // Create the new reference if (storeFile.hasReference()) { Reference reference = Reference.convert(storeFile.getReference()); - reference.write(fs, outPath); + tracker.createReference(reference, outPath); } else { InputStream in; if (linkPath != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 7dbf5b0655e0..a53d15d2f8e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; @@ -207,7 +208,7 @@ protected void addMobRegion(RegionInfo regionInfo, RegionVisitor visitor) throws monitor.rethrowException(); Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString()); - List storeFiles = getStoreFiles(storePath); + List storeFiles = getStoreFiles(storePath, htd, hcd, regionInfo); if (storeFiles == null) { if (LOG.isDebugEnabled()) { LOG.debug("No mob files under family: " + hcd.getNameAsString()); @@ -341,13 +342,18 @@ protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor vis } } - private List getStoreFiles(Path storeDir) throws IOException { - FileStatus[] stats = CommonFSUtils.listStatus(rootFs, storeDir); + private List getStoreFiles(Path storePath, TableDescriptor htd, + ColumnFamilyDescriptor hcd, RegionInfo regionInfo) throws IOException { + FileStatus[] stats = CommonFSUtils.listStatus(rootFs, storePath); if (stats == null) return null; + HRegionFileSystem regionFS = HRegionFileSystem.create(conf, rootFs, + MobUtils.getMobTableDir(new Path(conf.get(HConstants.HBASE_DIR)), htd.getTableName()), + regionInfo); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, htd, hcd, regionFS, false); ArrayList storeFiles = new ArrayList<>(stats.length); for (int i = 0; i < stats.length; ++i) { - storeFiles.add(new StoreFileInfo(conf, rootFs, stats[i])); + storeFiles.add(sft.getStoreFileInfo(stats[i], stats[i].getPath(), false)); } return storeFiles; } @@ -385,7 +391,7 @@ private void load() throws IOException { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { this.regionManifests = - SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc, htd); } finally { tpool.shutdown(); } @@ -403,7 +409,7 @@ private void load() throws IOException { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { v1Regions = - SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc, htd); v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, desc, manifestSizeLimit); } catch (InvalidProtocolBufferException e) { @@ -502,7 +508,7 @@ private void convertToV2SingleManifest() throws IOException { setStatusMsg("Loading Region manifests for " + this.desc.getName()); try { v1Regions = - SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc); + SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc, htd); v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc, manifestSizeLimit); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index 61c366de971a..26aeadc41928 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -30,9 +30,13 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; @@ -119,7 +123,7 @@ public void storeFile(final HRegionFileSystem region, final Path familyDir, static List loadRegionManifests(final Configuration conf, final Executor executor, final FileSystem fs, final Path snapshotDir, - final SnapshotDescription desc) throws IOException { + final SnapshotDescription desc, final TableDescriptor htd) throws IOException { FileStatus[] regions = CommonFSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { @@ -134,7 +138,7 @@ static List loadRegionManifests(final Configuration conf @Override public SnapshotRegionManifest call() throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath()); - return buildManifestFromDisk(conf, fs, snapshotDir, hri); + return buildManifestFromDisk(conf, fs, snapshotDir, hri, htd); } }); } @@ -159,7 +163,8 @@ static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, } static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, final FileSystem fs, - final Path tableDir, final RegionInfo regionInfo) throws IOException { + final Path tableDir, final RegionInfo regionInfo, final TableDescriptor htd) + throws IOException { HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, true); SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); @@ -179,7 +184,9 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, fi Collection familyNames = regionFs.getFamilies(); if (familyNames != null) { for (String familyName : familyNames) { - Collection storeFiles = regionFs.getStoreFiles(familyName, false); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, htd, + htd.getColumnFamily(familyName.getBytes()), regionFs, false); + List storeFiles = getStoreFiles(sft, regionFs, familyName, false); if (storeFiles == null) { LOG.debug("No files under family: " + familyName); continue; @@ -210,4 +217,32 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, fi } return manifest.build(); } + + public static List getStoreFiles(StoreFileTracker sft, HRegionFileSystem regionFS, + String familyName, boolean validate) throws IOException { + Path familyDir = new Path(regionFS.getRegionDir(), familyName); + FileStatus[] files = CommonFSUtils.listStatus(regionFS.getFileSystem(), familyDir); + if (files == null) { + if (LOG.isTraceEnabled()) { + LOG.trace("No StoreFiles for: " + familyDir); + } + return null; + } + + ArrayList storeFiles = new ArrayList<>(files.length); + for (FileStatus status : files) { + if (validate && !StoreFileInfo.isValid(status)) { + // recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to + // true, refer HBASE-23740 + if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) { + LOG.warn("Invalid StoreFile: {}", status.getPath()); + } + continue; + } + StoreFileInfo info = sft.getStoreFileInfo(status.getPath(), false); + storeFiles.add(info); + + } + return storeFiles; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 1e9d30a27883..28b99903aac4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; @@ -123,12 +124,12 @@ public static boolean shouldReplayRecoveredEdits(HRegion region) { * archive after compaction */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, - RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) - throws IOException { + RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path, + StoreFileTracker tracker) throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) { - return new StoreFileInfo(conf, fs, path, true); + return tracker.getStoreFileInfo(path, true); } // else create a store file link. The link file does not exists on filesystem though. @@ -137,7 +138,7 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); } else if (StoreFileInfo.isReference(path)) { - Reference reference = Reference.read(fs, path); + Reference reference = tracker.readReference(path); Path referencePath = StoreFileInfo.getReferredToFile(path); if (HFileLink.isHFileLink(referencePath)) { // HFileLink Reference diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java index 31aded84109c..38d45f0f4653 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java @@ -28,8 +28,11 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -100,14 +103,15 @@ Set getStoresRequiringCompaction(Set requestedStores, long times boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts) throws IOException { // do we have any store files? - Collection storeFiles = fileSystem.getStoreFiles(family); + StoreFileTracker sft = getStoreFileTracker(family, fileSystem); + List storeFiles = sft.load(); if (storeFiles == null) { LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem.getRegionInfo().getEncodedName(), " has no store files"); return false; } // check for reference files - if (fileSystem.hasReferences(family) && familyHasReferenceFile(fileSystem, family, ts)) { + if (sft.hasReferences() && familyHasReferenceFile(fileSystem, family, ts)) { LOG.info("Including store: " + family + " with: " + storeFiles.size() + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; @@ -121,6 +125,13 @@ boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts return includeStore; } + public StoreFileTracker getStoreFileTracker(String family, HRegionFileSystem fileSystem) + throws IOException { + TableDescriptor htd = connection.getTable(getRegion().getTable()).getDescriptor(); + return StoreFileTrackerFactory.create(connection.getConfiguration(), htd, + htd.getColumnFamily(family.getBytes()), fileSystem, false); + } + protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family, Collection storeFiles, long ts) throws IOException { diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp index b538cb7b6b4f..290dcd0342b4 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp @@ -54,7 +54,7 @@ printer.setConf(conf); String[] options = {"-s"}; printer.parseOptions(options); - StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), true); + StoreFileInfo sfi = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, new Path(storeFile), true); printer.processFile(sfi.getFileStatus().getPath(), true); String text = byteStream.toString();%> <%= diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 06373cda4023..8cc568c130b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -44,6 +44,9 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -509,7 +512,20 @@ public void testMergeRegion() throws Exception { Path tableDir = CommonFSUtils.getTableDir(rootDir, tableName); HRegionFileSystem regionFs = HRegionFileSystem .openRegionFromFileSystem(UTIL.getConfiguration(), fs, tableDir, mergedRegion, true); - return !regionFs.hasReferences(admin.getDescriptor(tableName)); + boolean references = false; + Path regionDir = new Path(tableDir, mergedRegion.getEncodedName()); + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { + StoreContext storeContext = StoreContext.getBuilder() + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of(familyDir.getName())) + .withRegionFileSystem(regionFs).withFamilyStoreDirectoryPath(familyDir).build(); + StoreFileTracker sft = + StoreFileTrackerFactory.create(UTIL.getConfiguration(), false, storeContext); + references = references || sft.hasReferences(); + if (references) { + break; + } + } + return !references; } catch (IOException e) { LOG.warn("Failed check merged region has no reference", e); return false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0ac03b8d4136..a999a4ac879c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -35,6 +36,10 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -42,8 +47,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -83,14 +92,18 @@ public static void tearDownAfterClass() throws Exception { * top of the file while we are at it. */ @Test - public void testHalfScanAndReseek() throws IOException { + public void testHalfScanAndReseek() throws IOException, InterruptedException { Configuration conf = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(conf); String root_dir = TEST_UTIL.getDataTestDir().toString(); Path parentPath = new Path(new Path(root_dir, "parent"), "CF"); fs.mkdirs(parentPath); - Path splitAPath = new Path(new Path(root_dir, "splita"), "CF"); - Path splitBPath = new Path(new Path(root_dir, "splitb"), "CF"); + String tableName = Paths.get(root_dir).getFileName().toString(); + RegionInfo splitAHri = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); + Thread.currentThread().sleep(1000); + RegionInfo splitBHri = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); + Path splitAPath = new Path(new Path(root_dir, splitAHri.getRegionNameAsString()), "CF"); + Path splitBPath = new Path(new Path(root_dir, splitBHri.getRegionNameAsString()), "CF"); Path filePath = StoreFileWriter.getUniqueFile(fs, parentPath); CacheConfig cacheConf = new CacheConfig(conf); @@ -112,12 +125,24 @@ public void testHalfScanAndReseek() throws IOException { Path splitFileA = new Path(splitAPath, filePath.getName() + ".parent"); Path splitFileB = new Path(splitBPath, filePath.getName() + ".parent"); + HRegionFileSystem splitAregionFS = + HRegionFileSystem.create(conf, fs, new Path(root_dir), splitAHri); + StoreContext splitAStoreContext = + StoreContext.getBuilder().withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("CF")) + .withFamilyStoreDirectoryPath(splitAPath).withRegionFileSystem(splitAregionFS).build(); + StoreFileTracker splitAsft = StoreFileTrackerFactory.create(conf, false, splitAStoreContext); Reference bottom = new Reference(midkey, Reference.Range.bottom); - bottom.write(fs, splitFileA); + splitAsft.createReference(bottom, splitFileA); doTestOfScanAndReseek(splitFileA, fs, bottom, cacheConf); + HRegionFileSystem splitBregionFS = + HRegionFileSystem.create(conf, fs, new Path(root_dir), splitBHri); + StoreContext splitBStoreContext = + StoreContext.getBuilder().withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("CF")) + .withFamilyStoreDirectoryPath(splitBPath).withRegionFileSystem(splitBregionFS).build(); + StoreFileTracker splitBsft = StoreFileTrackerFactory.create(conf, false, splitBStoreContext); Reference top = new Reference(midkey, Reference.Range.top); - top.write(fs, splitFileB); + splitBsft.createReference(top, splitFileB); doTestOfScanAndReseek(splitFileB, fs, top, cacheConf); r.close(); @@ -133,7 +158,8 @@ private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, Cach new ReaderContextBuilder().withInputStreamWrapper(in).withFileSize(length) .withReaderType(ReaderContext.ReaderType.PREAD).withFileSystem(fs).withFilePath(p); ReaderContext context = contextBuilder.build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p, true); + StoreFileInfo storeFileInfo = + new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, fs.getFileStatus(p), bottom); storeFileInfo.initHFileInfo(context); final HalfStoreFileReader halfreader = (HalfStoreFileReader) storeFileInfo.createReader(context, cacheConf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBytesReadFromFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBytesReadFromFs.java index 7e7b4cb5c37c..d90a48a4be98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBytesReadFromFs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBytesReadFromFs.java @@ -331,7 +331,7 @@ private void readBloomFilters(Path path, BloomType bt, byte[] key, KeyValue keyV readLoadOnOpenDataSection(path, true); CacheConfig cacheConf = new CacheConfig(conf); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, path, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, path, true); HStoreFile sf = new HStoreFile(storeFileInfo, bt, cacheConf); // Read HFile trailer and load-on-open data section diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index c9966745d717..8bbb14fd966b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -69,9 +69,12 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.PrefetchExecutorNotifier; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.TestHStoreFile; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -383,11 +386,14 @@ private void testPrefetchWhenRefs(boolean compactionEnabled, Consumer Path storeFile = fileWithSplitPoint.getFirst(); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, region); - HStoreFile file = new HStoreFile(fs, storeFile, conf, cacheConf, BloomType.NONE, true); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, + StoreContext.getBuilder().withFamilyStoreDirectoryPath(new Path(regionDir, "cf")) + .withRegionFileSystem(regionFS).build()); + HStoreFile file = new HStoreFile(fs, storeFile, conf, cacheConf, BloomType.NONE, true, sft); Path ref = regionFS.splitStoreFile(region, "cf", file, fileWithSplitPoint.getSecond(), false, - new ConstantSizeRegionSplitPolicy()); + new ConstantSizeRegionSplitPolicy(), sft); conf.setBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, compactionEnabled); - HStoreFile refHsf = new HStoreFile(this.fs, ref, conf, cacheConf, BloomType.NONE, true); + HStoreFile refHsf = new HStoreFile(this.fs, ref, conf, cacheConf, BloomType.NONE, true, sft); refHsf.initReader(); HFile.Reader reader = refHsf.getReader().getHFileReader(); while (!reader.prefetchComplete()) { @@ -424,13 +430,21 @@ private void testPrefetchWhenHFileLink(Consumer test) throws Exceptio Bytes.toBytes("testPrefetchWhenHFileLink")); Path storeFilePath = regionFs.commitStoreFile("cf", writer.getPath()); - Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", "cf")); + final RegionInfo dstHri = + RegionInfoBuilder.newBuilder(TableName.valueOf("testPrefetchWhenHFileLink")).build(); + HRegionFileSystem dstRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, + CommonFSUtils.getTableDir(testDir, dstHri.getTable()), dstHri); + Path dstPath = new Path(regionFs.getTableDir(), new Path(dstHri.getRegionNameAsString(), "cf")); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); + StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(dstRegionFs.getRegionDir(), "cf")) + .withRegionFileSystem(dstRegionFs).build()); // Try to open store file from link - StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath, true); + StoreFileInfo storeFileInfo = sft.getStoreFileInfo(linkFilePath, true); HStoreFile hsf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); assertTrue(storeFileInfo.isLink()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index 1e572e8c55e0..15fc42656ad4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -54,7 +54,10 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -163,7 +166,9 @@ public void testPrefetchRefsAfterSplit() throws Exception { HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, region); Path storeFile = writeStoreFile(100, cfDir); - + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, + StoreContext.getBuilder().withRegionFileSystem(regionFS).withFamilyStoreDirectoryPath(cfDir) + .withCacheConfig(cacheConf).build()); // Prefetches the file blocks LOG.debug("First read should prefetch the blocks."); readStoreFile(storeFile); @@ -174,10 +179,10 @@ public void testPrefetchRefsAfterSplit() throws Exception { // split the file and return references to the original file Random rand = ThreadLocalRandom.current(); byte[] splitPoint = RandomKeyValueUtil.randomOrderedKey(rand, 50); - HStoreFile file = new HStoreFile(fs, storeFile, conf, cacheConf, BloomType.NONE, true); + HStoreFile file = new HStoreFile(fs, storeFile, conf, cacheConf, BloomType.NONE, true, sft); Path ref = regionFS.splitStoreFile(region, "cf", file, splitPoint, false, - new ConstantSizeRegionSplitPolicy()); - HStoreFile refHsf = new HStoreFile(this.fs, ref, conf, cacheConf, BloomType.NONE, true); + new ConstantSizeRegionSplitPolicy(), sft); + HStoreFile refHsf = new HStoreFile(this.fs, ref, conf, cacheConf, BloomType.NONE, true, sft); // starts reader for the ref. The ref should resolve to the original file blocks // and not duplicate blocks in the cache. refHsf.initReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java index c2e24d1f569d..d713727460c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaMockingUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -60,6 +61,9 @@ import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -134,8 +138,10 @@ public void testCleanMerge() throws IOException { Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, merged, td.getColumnFamilies()[0].getName()); - Path parentaRef = createMergeReferenceFile(storedir, merged, parenta); - Path parentbRef = createMergeReferenceFile(storedir, merged, parentb); + Path parentaRef = + createMergeReferenceFile(storedir, tabledir, td.getColumnFamilies()[0], merged, parenta); + Path parentbRef = + createMergeReferenceFile(storedir, tabledir, td.getColumnFamilies()[0], merged, parentb); // references exist, should not clean assertFalse(CatalogJanitor.cleanMergeRegion(masterServices, merged, parents)); @@ -170,7 +176,7 @@ public void testDontCleanMergeIfFileSystemException() throws IOException { Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName()); Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, merged, td.getColumnFamilies()[0].getName()); - createMergeReferenceFile(storedir, merged, parenta); + createMergeReferenceFile(storedir, tabledir, td.getColumnFamilies()[0], merged, parenta); MasterServices mockedMasterServices = spy(masterServices); MasterFileSystem mockedMasterFileSystem = spy(masterServices.getMasterFileSystem()); @@ -198,14 +204,22 @@ public void testDontCleanMergeIfFileSystemException() throws IOException { assertFalse(CatalogJanitor.cleanMergeRegion(mockedMasterServices, merged, parents)); } - private Path createMergeReferenceFile(Path storeDir, HRegionInfo mergedRegion, - HRegionInfo parentRegion) throws IOException { + private Path createMergeReferenceFile(Path storeDir, Path tableDir, + ColumnFamilyDescriptor columnFamilyDescriptor, RegionInfo mergedRegion, RegionInfo parentRegion) + throws IOException { Reference ref = Reference.createTopReference(mergedRegion.getStartKey()); long now = EnvironmentEdgeManager.currentTime(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storeDir, Long.toString(now) + "." + parentRegion.getEncodedName()); FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem(); - return ref.write(fs, p); + HRegionFileSystem mergedRegionFS = + HRegionFileSystem.create(fs.getConf(), fs, tableDir, mergedRegion); + StoreContext storeContext = + StoreContext.getBuilder().withColumnFamilyDescriptor(columnFamilyDescriptor) + .withFamilyStoreDirectoryPath(storeDir).withRegionFileSystem(mergedRegionFS).build(); + StoreFileTracker sft = StoreFileTrackerFactory.create(fs.getConf(), false, storeContext); + sft.createReference(ref, p); + return p; } /** @@ -234,9 +248,16 @@ public void testCleanParent() throws IOException, InterruptedException { // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem(); - Path path = ref.write(fs, p); - assertTrue(fs.exists(path)); - LOG.info("Created reference " + path); + HRegionFileSystem regionFS = + HRegionFileSystem.create(this.masterServices.getConfiguration(), fs, tabledir, splita); + StoreContext storeContext = + StoreContext.getBuilder().withColumnFamilyDescriptor(td.getColumnFamilies()[0]) + .withFamilyStoreDirectoryPath(storedir).withRegionFileSystem(regionFS).build(); + StoreFileTracker sft = + StoreFileTrackerFactory.create(this.masterServices.getConfiguration(), false, storeContext); + sft.createReference(ref, p); + assertTrue(fs.exists(p)); + LOG.info("Created reference " + p); // Add a parentdir for kicks so can check it gets removed by the catalogjanitor. fs.mkdirs(parentdir); assertFalse(CatalogJanitor.cleanParent(masterServices, parent, r)); @@ -704,7 +725,14 @@ private Path createReferences(final MasterServices services, final TableDescript // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); - ref.write(fs, p); + HRegionFileSystem regionFS = + HRegionFileSystem.create(services.getConfiguration(), fs, tabledir, daughter); + StoreContext storeContext = + StoreContext.getBuilder().withColumnFamilyDescriptor(td.getColumnFamilies()[0]) + .withFamilyStoreDirectoryPath(storedir).withRegionFileSystem(regionFS).build(); + StoreFileTracker sft = + StoreFileTrackerFactory.create(services.getConfiguration(), false, storeContext); + sft.createReference(ref, p); return p; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java index 9c45a62aed34..eee9fd0d5fc9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -30,6 +30,9 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -69,8 +72,11 @@ public void testOpenClose() throws Exception { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir) .withFileContext(meta).build(); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); MobTestUtil.writeStoreFile(writer, caseName); - CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); + CachedMobFile cachedMobFile = + new CachedMobFile(new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf)); assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); cachedMobFile.open(); assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount()); @@ -93,12 +99,18 @@ public void testCompare() throws Exception { StoreFileWriter writer1 = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(outputDir1).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer1, caseName); - CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf); + StoreFileInfo storeFileInfo1 = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer1.getPath(), true); + CachedMobFile cachedMobFile1 = + new CachedMobFile(new HStoreFile(storeFileInfo1, BloomType.NONE, cacheConf)); Path outputDir2 = new Path(testDir, FAMILY2); StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(outputDir2).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer2, caseName); - CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf); + StoreFileInfo storeFileInfo2 = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer2.getPath(), true); + CachedMobFile cachedMobFile2 = + new CachedMobFile(new HStoreFile(storeFileInfo2, BloomType.NONE, cacheConf)); cachedMobFile1.access(1); cachedMobFile2.access(2); assertEquals(1, cachedMobFile1.compareTo(cachedMobFile2)); @@ -115,7 +127,10 @@ public void testReadKeyValue() throws Exception { .withFileContext(meta).build(); String caseName = testName.getMethodName(); MobTestUtil.writeStoreFile(writer, caseName); - CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + CachedMobFile cachedMobFile = + new CachedMobFile(new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf)); byte[] family = Bytes.toBytes(caseName); byte[] qualify = Bytes.toBytes(caseName); // Test the start key diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index 6aeab33893f2..a59b839a806f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.ToolRunner; import org.junit.After; @@ -42,6 +43,8 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category(MediumTests.class) public class TestExpiredMobFileCleaner { @@ -57,6 +60,7 @@ public class TestExpiredMobFileCleaner { private final static byte[] row2 = Bytes.toBytes("row2"); private final static byte[] row3 = Bytes.toBytes("row3"); private final static byte[] qf = Bytes.toBytes("qf"); + private static final Logger LOG = LoggerFactory.getLogger(TestExpiredMobFileCleaner.class); private static BufferedMutator table; private static Admin admin; @@ -136,6 +140,9 @@ public void testCleaner() throws Exception { byte[] dummyData = makeDummyData(600); long ts = EnvironmentEdgeManager.currentTime() - 3 * secondsOfDay() * 1000; // 3 days before putKVAndFlush(table, row1, dummyData, ts); + LOG.info("test log to be deleted, tablename is " + tableName); + CommonFSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), + TEST_UTIL.getDefaultRootDirPath(), LOG); FileStatus[] firstFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); // the first mob file assertEquals("Before cleanup without delay 1", 1, firstFiles.length); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java index 5e593c8c5e1a..ac7645bf1d9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -67,8 +68,9 @@ public void testReadKeyValue() throws Exception { String caseName = testName.getMethodName(); MobTestUtil.writeStoreFile(writer, caseName); - MobFile mobFile = - new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true)); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + MobFile mobFile = new MobFile(new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf)); byte[] family = Bytes.toBytes(caseName); byte[] qualify = Bytes.toBytes(caseName); @@ -116,8 +118,9 @@ public void testGetScanner() throws Exception { .withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer, testName.getMethodName()); - MobFile mobFile = - new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true)); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + MobFile mobFile = new MobFile(new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf)); assertNotNull(mobFile.getScanner()); assertTrue(mobFile.getScanner() instanceof StoreFileScanner); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java index 069fc48322bb..773203300a8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java @@ -26,21 +26,24 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.HMobStore; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -116,37 +119,21 @@ public void tearDown() throws Exception { /** * Create the mob store file. */ - private Path createMobStoreFile(String family) throws IOException { - return createMobStoreFile(HBaseConfiguration.create(), family); - } - - /** - * Create the mob store file - */ - private Path createMobStoreFile(Configuration conf, String family) throws IOException { - HColumnDescriptor hcd = new HColumnDescriptor(family); - hcd.setMaxVersions(4); - hcd.setMobEnabled(true); - return createMobStoreFile(hcd); - } - - /** - * Create the mob store file - */ - private Path createMobStoreFile(HColumnDescriptor hcd) throws IOException { + private Pair createAndGetMobStoreFileContextPair(String family) + throws IOException { + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(family)).setMaxVersions(4).setMobEnabled(true).build(); // Setting up a Store TableName tn = TableName.valueOf(TABLE); - HTableDescriptor htd = new HTableDescriptor(tn); - htd.addFamily(hcd); - HMobStore mobStore = (HMobStore) region.getStore(hcd.getName()); - KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE); - KeyValue key2 = new KeyValue(ROW, hcd.getName(), QF2, 1, VALUE); - KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2); + HMobStore mobStore = (HMobStore) region.getStore(columnFamilyDescriptor.getName()); + KeyValue key1 = new KeyValue(ROW, columnFamilyDescriptor.getName(), QF1, 1, VALUE); + KeyValue key2 = new KeyValue(ROW, columnFamilyDescriptor.getName(), QF2, 1, VALUE); + KeyValue key3 = new KeyValue(ROW2, columnFamilyDescriptor.getName(), QF3, 1, VALUE2); KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; HRegionInfo regionInfo = new HRegionInfo(tn); StoreFileWriter mobWriter = mobStore.createWriterInTmp(currentDate, maxKeyCount, - hcd.getCompactionCompression(), regionInfo.getStartKey(), false); + columnFamilyDescriptor.getCompactionCompressionType(), regionInfo.getStartKey(), false); Path mobFilePath = mobWriter.getPath(); String fileName = mobFilePath.getName(); mobWriter.append(key1); @@ -156,21 +143,26 @@ private Path createMobStoreFile(HColumnDescriptor hcd) throws IOException { String targetPathName = MobUtils.formatDate(currentDate); Path targetPath = new Path(mobStore.getPath(), targetPathName); mobStore.commitFile(mobFilePath, targetPath); - return new Path(targetPath, fileName); + return new Pair(new Path(targetPath, fileName), mobStore.getStoreContext()); } @Test public void testMobFileCache() throws Exception { FileSystem fs = FileSystem.get(conf); - Path file1Path = createMobStoreFile(FAMILY1); - Path file2Path = createMobStoreFile(FAMILY2); - Path file3Path = createMobStoreFile(FAMILY3); + Pair fileAndContextPair1 = createAndGetMobStoreFileContextPair(FAMILY1); + Pair fileAndContextPair2 = createAndGetMobStoreFileContextPair(FAMILY2); + Pair fileAndContextPair3 = createAndGetMobStoreFileContextPair(FAMILY3); + + Path file1Path = fileAndContextPair1.getFirst(); + Path file2Path = fileAndContextPair2.getFirst(); + Path file3Path = fileAndContextPair3.getFirst(); CacheConfig cacheConf = new CacheConfig(conf); // Before open one file by the MobFileCache assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize()); // Open one file by the MobFileCache - CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf); + CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf, + fileAndContextPair1.getSecond()); assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize()); assertNotNull(cachedMobFile1); assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount()); @@ -189,11 +181,14 @@ public void testMobFileCache() throws Exception { cachedMobFile1.close(); // Close the cached mob file // Reopen three cached file - cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf); + cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf, + fileAndContextPair1.getSecond()); assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize()); - CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile(fs, file2Path, cacheConf); + CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile(fs, file2Path, cacheConf, + fileAndContextPair2.getSecond()); assertEquals(EXPECTED_CACHE_SIZE_TWO, mobFileCache.getCacheSize()); - CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile(fs, file3Path, cacheConf); + CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile(fs, file3Path, cacheConf, + fileAndContextPair3.getSecond()); // Before the evict // Evict the cache, should close the first file 1 assertEquals(EXPECTED_CACHE_SIZE_THREE, mobFileCache.getCacheSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java index 123965c0eca2..bdfb42a14970 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java @@ -62,12 +62,15 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.RegionAsTable; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.security.User; @@ -329,9 +332,17 @@ private long countMobCellsInMetadata() throws IOException { copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); CacheConfig cacheConfig = new CacheConfig(copyOfConf); if (fs.exists(mobDirPath)) { + // TODO: use sft.load() api here + HRegionFileSystem regionFs = HRegionFileSystem.create(copyOfConf, fs, + MobUtils.getMobTableDir(copyOfConf, htd.getTableName()), region.getRegionInfo()); + StoreFileTracker sft = StoreFileTrackerFactory.create(copyOfConf, false, + StoreContext.getBuilder().withColumnFamilyDescriptor(hcd) + .withFamilyStoreDirectoryPath(mobDirPath).withCacheConfig(cacheConfig) + .withRegionFileSystem(regionFs).build()); FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath); for (FileStatus file : files) { - HStoreFile sf = new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true); + HStoreFile sf = + new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true, sft); sf.initReader(); Map fileInfo = sf.getReader().loadFileInfo(); byte[] count = fileInfo.get(MOB_CELLS_COUNT); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java index 53a9de7f71b5..48a4e7b2984c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java @@ -26,6 +26,7 @@ import java.util.Map; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; @@ -46,9 +47,11 @@ protected ArrayList sfCreate(long[] minTimestamps, long[] maxTimesta } ArrayList ret = Lists.newArrayList(); + StoreFileTrackerForTest storeFileTrackerForTest = + new StoreFileTrackerForTest(conf, true, store.getStoreContext()); for (int i = 0; i < sizes.length; i++) { - MockHStoreFile msf = - new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, i); + MockHStoreFile msf = new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), + false, i, storeFileTrackerForTest); msf.setTimeRangeTracker( TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, minTimestamps[i], maxTimestamps[i])); ret.add(msf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index ec9de92e9f25..d7a467365477 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -582,7 +582,8 @@ public static void testCodecs(Configuration conf, int kvLimit, String hfilePath, Path path = new Path(hfilePath); CacheConfig cacheConf = new CacheConfig(conf); FileSystem fs = FileSystem.get(conf); - HStoreFile hsf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, path, true); + HStoreFile hsf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); hsf.initReader(); StoreFileReader reader = hsf.getReader(); reader.loadFileInfo(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index fbd6286c5a94..de9494c580a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -58,8 +58,9 @@ private List prepareListOfTestSeeks(Path path) throws IOException { List allKeyValues = new ArrayList<>(); // read all of the key values - HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, - cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(configuration, + testingUtility.getTestFileSystem(), path, true); + HStoreFile storeFile = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false, false, 0, 0, false); @@ -87,8 +88,9 @@ private List prepareListOfTestSeeks(Path path) throws IOException { private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) throws IOException { // read all of the key values - HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, - cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(configuration, + testingUtility.getTestFileSystem(), path, true); + HStoreFile storeFile = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); storeFile.initReader(); long totalSize = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index b31c738149f9..4f4717903087 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.net.UnknownHostException; import java.util.Arrays; import java.util.Map; import java.util.Optional; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -52,9 +54,13 @@ public class MockHStoreFile extends HStoreFile { boolean compactedAway; MockHStoreFile(HBaseTestingUtility testUtil, Path testPath, long length, long ageInDisk, - boolean isRef, long sequenceid) throws IOException { - super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(), - new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true); + boolean isRef, long sequenceid, StoreFileInfo storeFileInfo) throws IOException { + super(storeFileInfo, BloomType.NONE, new CacheConfig(testUtil.getConfiguration())); + setMockHStoreFileVals(length, isRef, ageInDisk, sequenceid, isMajor, testUtil); + } + + private void setMockHStoreFileVals(long length, boolean isRef, long ageInDisk, long sequenceid, + boolean isMajor, HBaseTestingUtility testUtil) throws UnknownHostException { this.length = length; this.isRef = isRef; this.ageInDisk = ageInDisk; @@ -67,6 +73,13 @@ public class MockHStoreFile extends HStoreFile { modificationTime = EnvironmentEdgeManager.currentTime(); } + MockHStoreFile(HBaseTestingUtility testUtil, Path testPath, long length, long ageInDisk, + boolean isRef, long sequenceid, StoreFileTracker tracker) throws IOException { + super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(), + new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true, tracker); + setMockHStoreFileVals(length, isRef, ageInDisk, sequenceid, isMajor, testUtil); + } + void setLength(long newLen) { this.length = newLen; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 22b02b7cf825..6ac3c4e07d23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -219,7 +219,8 @@ public void testCacheOnWriteInSchema() throws IOException { private void readStoreFile(Path path) throws IOException { CacheConfig cacheConf = store.getCacheConfig(); BlockCache cache = cacheConf.getBlockCache().get(); - HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, path, true); + HStoreFile sf = new HStoreFile(storeFileInfo, BloomType.ROWCOL, cacheConf); sf.initReader(); HFile.Reader reader = sf.getReader().getHFileReader(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java index 97b62e9d987c..706b98aeef7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -155,7 +156,10 @@ public void testRemoveCompactedFilesWithException() throws Exception { out.writeInt(1); out.close(); - HStoreFile errStoreFile = new MockHStoreFile(testUtil, errFile, 1, 0, false, 1); + StoreFileTrackerForTest storeFileTrackerForTest = + new StoreFileTrackerForTest(store.getReadOnlyConfiguration(), true, store.getStoreContext()); + HStoreFile errStoreFile = + new MockHStoreFile(testUtil, errFile, 1, 0, false, 1, storeFileTrackerForTest); fileManager.addCompactionResults(ImmutableList.of(errStoreFile), ImmutableList.of()); // cleanup compacted files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java index dcd900ec33a7..5d764df9eb90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -162,9 +163,11 @@ List sfCreate(boolean isReference, long... sizes) throws IOException List sfCreate(boolean isReference, ArrayList sizes, ArrayList ageInDisk) throws IOException { List ret = Lists.newArrayList(); + StoreFileTrackerForTest storeFileTrackerForTest = + new StoreFileTrackerForTest(conf, true, store.getStoreContext()); for (int i = 0; i < sizes.size(); i++) { - ret.add( - new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, i)); + ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, + i, storeFileTrackerForTest)); } return ret; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 87f9af25e2c7..6f708d26f3a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -197,7 +197,8 @@ private void validateFalsePosRate(double falsePosRate, int nTrials, double zValu private void readStoreFile(int t, BloomType bt, List kvs, Path sfPath) throws IOException { - HStoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, sfPath, true); + HStoreFile sf = new HStoreFile(storeFileInfo, bt, cacheConf); sf.initReader(); StoreFileReader r = sf.getReader(); final boolean pread = true; // does not really matter diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java index 0771d41bb433..a6caff227598 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java @@ -62,6 +62,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -800,7 +802,10 @@ private static HStoreFile createHStoreFile(Path storeDir, Configuration conf, lo writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), timestamp); - return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFs).build(); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, storeContext); + return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, BloomType.NONE, true, + sft); } private static Configuration getConfWithCustomCellDataTieringEnabled(long hotDataAge) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java index b2c363bf53ac..3796e3e5b8db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java @@ -32,9 +32,11 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.compactions.CustomDateTieredCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -65,7 +67,13 @@ private HStoreFile createFile(RegionInfo regionInfo, Path file, long minValue, l FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); HRegionFileSystem regionFileSystem = new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, file, regionInfo); - MockHStoreFile msf = new MockHStoreFile(TEST_UTIL, file, size, ageInDisk, false, (long) seqId); + StoreContext ctx = new StoreContext.Builder() + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()) + .withRegionFileSystem(regionFileSystem).build(); + StoreFileTrackerForTest sftForTest = + new StoreFileTrackerForTest(TEST_UTIL.getConfiguration(), true, ctx); + MockHStoreFile msf = + new MockHStoreFile(TEST_UTIL, file, size, ageInDisk, false, (long) seqId, sftForTest); TimeRangeTracker timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); timeRangeTracker.setMin(minValue); timeRangeTracker.setMax(maxValue); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java index 585482c94093..507f14a86946 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java @@ -62,6 +62,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -816,7 +818,11 @@ static HStoreFile createHStoreFile(Path storeDir, Configuration conf, long times writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), timestamp); - return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFs).build(); + + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, storeContext); + return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, BloomType.NONE, true, + sft); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java index 309b18288669..795262a8486d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -34,6 +35,8 @@ import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -84,8 +87,11 @@ public void testSplitStoreDir() throws Exception { .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) .build(); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; + StoreFileTracker sft = + StoreFileTrackerFactory.create(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), + true, region.getStores().get(0).getStoreContext()); Path result = regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy()); + Bytes.toBytes("002"), false, region.getSplitPolicy(), sft); // asserts the reference file naming is correct validateResultingFile(region.getRegionInfo().getEncodedName(), result); // Additionally check if split region dir was created directly under table dir, not on .tmp @@ -113,16 +119,18 @@ public void testMergeStoreFile() throws Exception { .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) .build(); - HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem( - TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), - regionFS.getTableDir(), mergeResult); + Configuration configuration = TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(); + HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(configuration, + regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); // merge file from first region HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; - mergeFileFromRegion(mergeRegionFs, first, file); + mergeFileFromRegion(mergeRegionFs, first, file, StoreFileTrackerFactory.create(configuration, + true, first.getStore(FAMILY_NAME).getStoreContext())); // merge file from second region file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; - mergeFileFromRegion(mergeRegionFs, second, file); + mergeFileFromRegion(mergeRegionFs, second, file, StoreFileTrackerFactory.create(configuration, + true, second.getStore(FAMILY_NAME).getStoreContext())); } @Test @@ -163,11 +171,14 @@ public void testCommitDaughterRegionWithFiles() throws Exception { Path splitDirB = regionFS.getSplitsDir(daughterB); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List filesA = new ArrayList<>(); + StoreFileTracker sft = + StoreFileTrackerFactory.create(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), + true, region.getStores().get(0).getStoreContext()); filesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy())); + Bytes.toBytes("002"), false, region.getSplitPolicy(), sft)); List filesB = new ArrayList<>(); filesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), true, region.getSplitPolicy())); + Bytes.toBytes("002"), true, region.getSplitPolicy(), sft)); MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); Path resultA = regionFS.commitDaughterRegion(daughterA, filesA, env); @@ -196,17 +207,19 @@ public void testCommitMergedRegion() throws Exception { .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) .build(); - HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem( - TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), - regionFS.getTableDir(), mergeResult); + Configuration configuration = TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(); + HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(configuration, + regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); // merge file from first region HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; - mergeFileFromRegion(mergeRegionFs, first, file); + mergeFileFromRegion(mergeRegionFs, first, file, StoreFileTrackerFactory.create(configuration, + true, first.getStore(FAMILY_NAME).getStoreContext())); // merge file from second region file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List mergedFiles = new ArrayList<>(); - mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file)); + mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file, StoreFileTrackerFactory + .create(configuration, true, second.getStore(FAMILY_NAME).getStoreContext()))); MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); mergeRegionFs.commitMergedRegion(mergedFiles, env); @@ -229,9 +242,9 @@ private void waitForSplitProcComplete(int attempts, int waitTime) throws Excepti } private Path mergeFileFromRegion(HRegionFileSystem regionFS, HRegion regionToMerge, - HStoreFile file) throws IOException { - Path mergedFile = - regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), Bytes.toString(FAMILY_NAME), file); + HStoreFile file, StoreFileTracker sft) throws IOException { + Path mergedFile = regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), + Bytes.toString(FAMILY_NAME), file, sft); validateResultingFile(regionToMerge.getRegionInfo().getEncodedName(), mergedFile); return mergedFile; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index b94025590430..a0886123c16d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -94,8 +94,9 @@ public void testHFileScannerThrowsErrors() throws IOException { .withOutputDir(hfilePath).withFileContext(meta).build(); TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); - HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(util.getConfiguration(), + fs, writer.getPath(), true); + HStoreFile sf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); sf.initReader(); StoreFileReader reader = sf.getReader(); HFileScanner scanner = reader.getScanner(false, true); @@ -139,8 +140,9 @@ public void testStoreFileScannerThrowsErrors() throws IOException { .withOutputDir(hfilePath).withFileContext(meta).build(); TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); - HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(util.getConfiguration(), + fs, writer.getPath(), true); + HStoreFile sf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); List scanners = StoreFileScanner.getScannersForStoreFiles( Collections.singletonList(sf), false, true, false, false, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 059935084066..d9a735144480 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -150,6 +150,8 @@ import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; @@ -5687,8 +5689,14 @@ public void testCompactionFromPrimary() throws IOException { // move the file of the primary region to the archive, simulating a compaction Collection storeFiles = primaryRegion.getStore(families[0]).getStorefiles(); primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles); - Collection storeFileInfos = - primaryRegion.getRegionFileSystem().getStoreFiles(Bytes.toString(families[0])); + HRegionFileSystem regionFs = primaryRegion.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(primaryRegion.getBaseConf(), false, + StoreContext.getBuilder() + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.newBuilder(families[0]).build()) + .withFamilyStoreDirectoryPath( + new Path(regionFs.getRegionDir(), Bytes.toString(families[0]))) + .withRegionFileSystem(regionFs).build()); + Collection storeFileInfos = sft.load(); Assert.assertTrue(storeFileInfos == null || storeFileInfos.isEmpty()); verifyData(secondaryRegion, 0, 1000, cq, families); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 49c2f83e8b3c..7f8e3e71afb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.net.URI; -import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -39,11 +38,14 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -359,20 +361,25 @@ public void testTempAndCommit() throws IOException { ; RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); - + StoreContext storeContext = StoreContext.getBuilder() + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of(familyName)) + .withFamilyStoreDirectoryPath( + new Path(regionFs.getTableDir(), new Path(hri.getRegionNameAsString(), familyName))) + .withRegionFileSystem(regionFs).build(); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, storeContext); // New region, no store files - Collection storeFiles = regionFs.getStoreFiles(familyName); + List storeFiles = sft.load(); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); // Create a new file in temp (no files in the family) Path buildPath = regionFs.createTempName(); fs.createNewFile(buildPath); - storeFiles = regionFs.getStoreFiles(familyName); + storeFiles = sft.load(); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); // commit the file Path dstPath = regionFs.commitStoreFile(familyName, buildPath); - storeFiles = regionFs.getStoreFiles(familyName); + storeFiles = sft.load(); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); assertFalse(fs.exists(buildPath)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index eca38d0bc239..5d856edadb01 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -121,6 +121,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.compactions.EverythingPolicy; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; @@ -788,8 +790,8 @@ public Object run() throws Exception { LOG.info("Before flush, we should have no files"); - Collection files = - store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, store.getStoreContext()); + Collection files = sft.load(); assertEquals(0, files != null ? files.size() : 0); // flush @@ -802,7 +804,7 @@ public Object run() throws Exception { } LOG.info("After failed flush, we should still have no files!"); - files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); + files = sft.load(); assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index b93e0472bd71..f21f5ccfcc46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -82,6 +82,8 @@ import org.apache.hadoop.hbase.io.hfile.UncompressedBlockSizePredicator; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.BloomFilterFactory; @@ -163,8 +165,12 @@ public void testBasicHalfAndHFileLinkMapFile() throws Exception { writeStoreFile(writer); Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); - HStoreFile sf = new HStoreFile(this.fs, sfPath, conf, cacheConf, BloomType.NONE, true); - checkHalfHFile(regionFs, sf); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), TEST_FAMILY)) + .withRegionFileSystem(regionFs).build()); + HStoreFile sf = new HStoreFile(this.fs, sfPath, conf, cacheConf, BloomType.NONE, true, sft); + checkHalfHFile(regionFs, sf, sft); } private void writeStoreFile(final StoreFileWriter writer) throws IOException { @@ -228,7 +234,11 @@ public void testReference() throws IOException { writeStoreFile(writer); Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); - HStoreFile hsf = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), TEST_FAMILY)) + .withRegionFileSystem(regionFs).build()); + HStoreFile hsf = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true, sft); hsf.initReader(); StoreFileReader reader = hsf.getReader(); // Split on a row, not in middle of row. Midkey returned by reader @@ -239,9 +249,10 @@ public void testReference() throws IOException { hsf.closeStoreFile(true); // Make a reference - HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow); - Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true); - HStoreFile refHsf = new HStoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE, true); + RegionInfo splitHri = RegionInfoBuilder.newBuilder(hri.getTable()).setEndKey(midRow).build(); + Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true, sft); + HStoreFile refHsf = + new HStoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE, true, sft); refHsf.initReader(); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. @@ -274,8 +285,11 @@ public void testStoreFileReference() throws Exception { writeStoreFile(writer); Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); writer.close(); - - HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true); + StoreFileTracker sft = StoreFileTrackerFactory.create(conf, false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), TEST_FAMILY)) + .withRegionFileSystem(regionFs).build()); + HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true, sft); file.initReader(); StoreFileReader r = file.getReader(); assertNotNull(r); @@ -312,6 +326,10 @@ public void testHFileLink() throws IOException { CommonFSUtils.setRootDir(testConf, testDir); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, CommonFSUtils.getTableDir(testDir, hri.getTable()), hri); + final RegionInfo dstHri = + RegionInfoBuilder.newBuilder(TableName.valueOf("testHFileLinkTb")).build(); + HRegionFileSystem dstRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, + CommonFSUtils.getTableDir(testDir, dstHri.getTable()), dstHri); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. @@ -320,13 +338,22 @@ public void testHFileLink() throws IOException { writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); - Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY)); + Path dstPath = + new Path(regionFs.getTableDir(), new Path(dstHri.getRegionNameAsString(), TEST_FAMILY)); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); // Try to open store file from link - StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath, true); + + // this should be the SFT for the destination link file path, though it is not + // being used right now, for the next patch file link creation logic also would + // move to SFT interface. + StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(dstHri.getRegionNameAsString(), TEST_FAMILY)) + .withRegionFileSystem(dstRegionFs).build()); + StoreFileInfo storeFileInfo = sft.getStoreFileInfo(linkFilePath, true); HStoreFile hsf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); assertTrue(storeFileInfo.isLink()); hsf.initReader(); @@ -341,6 +368,13 @@ public void testHFileLink() throws IOException { assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count); } + @Test + public void testsample() { + Path p1 = new Path("/r1/c1"); + Path p2 = new Path("f1"); + System.out.println(new Path(p1, p2).toString()); + } + /** * This test creates an hfile and then the dir structures and files to verify that references to * hfilelinks (created by snapshot clones) can be properly interpreted. @@ -375,12 +409,33 @@ public void testReferenceToHFileLink() throws IOException { // create splits of the link. // /clone/splitA//, // /clone/splitB// - HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY); - HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null); - HStoreFile f = new HStoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE, true); + RegionInfo splitHriA = RegionInfoBuilder.newBuilder(hri.getTable()).setEndKey(SPLITKEY).build(); + RegionInfo splitHriB = + RegionInfoBuilder.newBuilder(hri.getTable()).setStartKey(SPLITKEY).build(); + + StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, true, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(hriClone.getRegionNameAsString(), TEST_FAMILY)) + .withRegionFileSystem(cloneRegionFs).build()); + + HRegionFileSystem splitRegionAFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, + CommonFSUtils.getTableDir(testDir, splitHriA.getTable()), splitHriA); + StoreFileTracker sftA = StoreFileTrackerFactory.create(testConf, true, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(splitHriA.getRegionNameAsString(), TEST_FAMILY)) + .withRegionFileSystem(splitRegionAFs).build()); + HRegionFileSystem splitRegionBFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, + CommonFSUtils.getTableDir(testDir, splitHriB.getTable()), splitHriB); + StoreFileTracker sftB = StoreFileTrackerFactory.create(testConf, true, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(splitHriB.getRegionNameAsString(), TEST_FAMILY)) + .withRegionFileSystem(splitRegionBFs).build()); + HStoreFile f = new HStoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE, true, sft); f.initReader(); - Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top - Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom + // top + Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true, sft); + // bottom + Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false, sft); f.closeStoreFile(true); // OK test the thing CommonFSUtils.logFileSystemState(fs, testDir, LOG); @@ -389,7 +444,8 @@ public void testReferenceToHFileLink() throws IOException { // reference to a hfile link. This code in StoreFile that handles this case. // Try to open store file from link - HStoreFile hsfA = new HStoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE, true); + HStoreFile hsfA = + new HStoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE, true, sftA); hsfA.initReader(); // Now confirm that I can read from the ref to link @@ -402,7 +458,8 @@ public void testReferenceToHFileLink() throws IOException { assertTrue(count > 0); // read some rows here // Try to open store file from link - HStoreFile hsfB = new HStoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE, true); + HStoreFile hsfB = + new HStoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE, true, sftB); hsfB.initReader(); // Now confirm that I can read from the ref to link @@ -419,8 +476,8 @@ public void testReferenceToHFileLink() throws IOException { assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count); } - private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f) - throws IOException { + private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f, + StoreFileTracker sft) throws IOException { f.initReader(); Cell midkey = f.getReader().midKey().get(); KeyValue midKV = (KeyValue) midkey; @@ -428,16 +485,19 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // in the children byte[] midRow = CellUtil.cloneRow(midKV); // Create top split. - HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow); - Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); + RegionInfo topHri = + RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()).setEndKey(SPLITKEY).build(); + Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true, sft); // Create bottom split. - HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null); - Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); + RegionInfo bottomHri = RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()) + .setStartKey(SPLITKEY).build(); + Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false, sft); // Make readers on top and bottom. - HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true); + HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true, sft); topF.initReader(); StoreFileReader top = topF.getReader(); - HStoreFile bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true); + HStoreFile bottomF = + new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true, sft); bottomF.initReader(); StoreFileReader bottom = bottomF.getReader(); ByteBuffer previous = null; @@ -493,12 +553,12 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // properly. byte[] badmidkey = Bytes.toBytes(" ."); assertTrue(fs.exists(f.getPath())); - topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); - bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); + topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true, sft); + bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false, sft); assertNull(bottomPath); - topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true); + topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true, sft); topF.initReader(); top = topF.getReader(); // Now read from the top. @@ -533,11 +593,11 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // Test when badkey is > than last key in file ('||' > 'zz'). badmidkey = Bytes.toBytes("|||"); - topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); - bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); + topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true, sft); + bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false, sft); assertNull(topPath); - bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true); + bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true, sft); bottomF.initReader(); bottom = bottomF.getReader(); first = true; @@ -592,7 +652,7 @@ private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Except writer.close(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -681,7 +741,7 @@ public void testDeleteFamilyBloomFilter() throws Exception { writer.close(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -734,7 +794,7 @@ public void testReseek() throws Exception { writer.close(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -798,7 +858,7 @@ public void testBloomTypes() throws Exception { ReaderContext context = new ReaderContextBuilder().withFilePath(f).withFileSize(fs.getFileStatus(f).getLen()) .withFileSystem(fs).withInputStreamWrapper(new FSDataInputStreamWrapper(fs, f)).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -936,8 +996,9 @@ public void testMultipleTimestamps() throws IOException { writer.appendMetadata(0, false); writer.close(); - HStoreFile hsf = - new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile hsf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of(family)); hsf.initReader(); @@ -991,13 +1052,14 @@ public void testCacheOnWriteEvictOnClose() throws Exception { CacheConfig cacheConf = new CacheConfig(conf, bc); Path pathCowOff = new Path(baseDir, "123456789"); StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); - HStoreFile hsf = - new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); - LOG.debug(hsf.getPath().toString()); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile hsfCowOff = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); + LOG.debug(hsfCowOff.getPath().toString()); // Read this file, we should see 3 misses - hsf.initReader(); - StoreFileReader reader = hsf.getReader(); + hsfCowOff.initReader(); + StoreFileReader reader = hsfCowOff.getReader(); reader.loadFileInfo(); StoreFileScanner scanner = getStoreFileScanner(reader, true, true); scanner.seek(KeyValue.LOWESTKEY); @@ -1016,11 +1078,12 @@ public void testCacheOnWriteEvictOnClose() throws Exception { cacheConf = new CacheConfig(conf, bc); Path pathCowOn = new Path(baseDir, "123456788"); writer = writeStoreFile(conf, cacheConf, pathCowOn, 3); - hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile hsfCowOn = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); // Read this file, we should see 3 hits - hsf.initReader(); - reader = hsf.getReader(); + hsfCowOn.initReader(); + reader = hsfCowOn.getReader(); scanner = getStoreFileScanner(reader, true, true); scanner.seek(KeyValue.LOWESTKEY); while (scanner.next() != null) { @@ -1034,15 +1097,13 @@ public void testCacheOnWriteEvictOnClose() throws Exception { reader.close(cacheConf.shouldEvictOnClose()); // Let's read back the two files to ensure the blocks exactly match - hsf = new HStoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE, true); - hsf.initReader(); - StoreFileReader readerOne = hsf.getReader(); + hsfCowOff.initReader(); + StoreFileReader readerOne = hsfCowOff.getReader(); readerOne.loadFileInfo(); StoreFileScanner scannerOne = getStoreFileScanner(readerOne, true, true); scannerOne.seek(KeyValue.LOWESTKEY); - hsf = new HStoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE, true); - hsf.initReader(); - StoreFileReader readerTwo = hsf.getReader(); + hsfCowOn.initReader(); + StoreFileReader readerTwo = hsfCowOn.getReader(); readerTwo.loadFileInfo(); StoreFileScanner scannerTwo = getStoreFileScanner(readerTwo, true, true); scannerTwo.seek(KeyValue.LOWESTKEY); @@ -1071,9 +1132,8 @@ public void testCacheOnWriteEvictOnClose() throws Exception { // Let's close the first file with evict on close turned on conf.setBoolean("hbase.rs.evictblocksonclose", true); cacheConf = new CacheConfig(conf, bc); - hsf = new HStoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE, true); - hsf.initReader(); - reader = hsf.getReader(); + hsfCowOff.initReader(); + reader = hsfCowOff.getReader(); reader.close(cacheConf.shouldEvictOnClose()); // We should have 3 new evictions but the evict count stat should not change. Eviction because @@ -1085,9 +1145,8 @@ public void testCacheOnWriteEvictOnClose() throws Exception { // Let's close the second file with evict on close turned off conf.setBoolean("hbase.rs.evictblocksonclose", false); cacheConf = new CacheConfig(conf, bc); - hsf = new HStoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE, true); - hsf.initReader(); - reader = hsf.getReader(); + hsfCowOn.initReader(); + reader = hsfCowOn.getReader(); reader.close(cacheConf.shouldEvictOnClose()); // We expect no changes @@ -1097,9 +1156,9 @@ public void testCacheOnWriteEvictOnClose() throws Exception { } private Path splitStoreFile(final HRegionFileSystem regionFs, final RegionInfo hri, - final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef) - throws IOException { - Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef, null); + final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef, + StoreFileTracker sft) throws IOException { + Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef, null, sft); if (null == path) { return null; } @@ -1166,8 +1225,9 @@ public void testDataBlockEncodingMetaData() throws IOException { .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); writer.close(); - HStoreFile storeFile = - new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile storeFile = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); @@ -1195,8 +1255,9 @@ public void testDataBlockSizeEncoded() throws Exception { .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); writeStoreFile(writer); - HStoreFile storeFile = - new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile storeFile = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); @@ -1254,8 +1315,9 @@ private void testDataBlockSizeWithCompressionRatePredicator(int expectedBlockCou writeLargeStoreFile(writer, Bytes.toBytes(name.getMethodName()), Bytes.toBytes(name.getMethodName()), 200); writer.close(); - HStoreFile storeFile = - new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + StoreFileInfo storeFileInfo = + StoreFileInfo.createStoreFileInfoForHFile(conf, fs, writer.getPath(), true); + HStoreFile storeFile = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); storeFile.initReader(); HFile.Reader fReader = HFile.createReader(fs, writer.getPath(), storeFile.getCacheConf(), true, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java index 84437335d83e..36597910172c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java @@ -47,6 +47,8 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -121,11 +123,16 @@ public void testCommitDaughterRegion() throws Exception { .setRegionId(region.getRegionInfo().getRegionId()).build(); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List splitFilesA = new ArrayList<>(); + HRegionFileSystem regionFs = region.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(region.getBaseConf(), true, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), "info")) + .withRegionFileSystem(regionFs).build()); splitFilesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy())); + Bytes.toBytes("002"), false, region.getSplitPolicy(), sft)); List splitFilesB = new ArrayList<>(); splitFilesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), true, region.getSplitPolicy())); + Bytes.toBytes("002"), true, region.getSplitPolicy(), sft)); MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); Path resultA = regionFS.commitDaughterRegion(daughterA, splitFilesA, env); @@ -219,7 +226,13 @@ public void testMergeLoadsFromTracker() throws Exception { private Pair copyFileInTheStoreDir(HRegion region) throws IOException { Path storeDir = region.getRegionFileSystem().getStoreDir("info"); // gets the single file - StoreFileInfo fileInfo = region.getRegionFileSystem().getStoreFiles("info").get(0); + HRegionFileSystem regionFs = region.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(region.getBaseConf(), false, + StoreContext.getBuilder().withFamilyStoreDirectoryPath(storeDir) + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)) + .withRegionFileSystem(regionFs).build()); + List infos = sft.load(); + StoreFileInfo fileInfo = infos.get(0); // make a copy of the valid file staight into the store dir, so that it's not tracked. String copyName = UUID.randomUUID().toString().replaceAll("-", ""); Path copy = new Path(storeDir, copyName); @@ -231,7 +244,13 @@ private Pair copyFileInTheStoreDir(HRegion region) throws private void validateDaughterRegionsFiles(HRegion region, String originalFileName, String untrackedFile) throws IOException { // verify there's no link for the untracked, copied file in first region - List infos = region.getRegionFileSystem().getStoreFiles("info"); + HRegionFileSystem regionFs = region.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(regionFs.getFileSystem().getConf(), false, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), "info")) + .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)) + .withRegionFileSystem(regionFs).build()); + List infos = sft.load(); assertThat(infos, everyItem(hasProperty("activeFileName", not(containsString(untrackedFile))))); assertThat(infos, hasItem(hasProperty("activeFileName", containsString(originalFileName)))); } @@ -246,7 +265,13 @@ private void verifyFilesAreTracked(Path regionDir, FileSystem fs) throws Excepti private Path mergeFileFromRegion(HRegion regionToMerge, HRegionFileSystem mergeFS) throws IOException { HStoreFile file = (HStoreFile) regionToMerge.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; - return mergeFS.mergeStoreFile(regionToMerge.getRegionInfo(), Bytes.toString(FAMILY_NAME), file); + HRegionFileSystem regionFs = regionToMerge.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(regionToMerge.getBaseConf(), true, + StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(new Path(regionFs.getRegionDir(), FAMILY_NAME_STR)) + .withRegionFileSystem(regionFs).build()); + return mergeFS.mergeStoreFile(regionToMerge.getRegionInfo(), Bytes.toString(FAMILY_NAME), file, + sft); } private void putThreeRowsAndFlush(TableName table) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 2f4bfbe7dbe8..549371f6cc37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -60,6 +60,8 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -247,7 +249,9 @@ public void testCleanMergeReference() throws Exception { new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo); int count = 0; for (ColumnFamilyDescriptor colFamily : columnFamilies) { - count += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); + StoreFileTracker sft = StoreFileTrackerFactory.create(TEST_UTIL.getConfiguration(), + tableDescriptor, colFamily, hrfs, false); + count += sft.load().size(); } ADMIN.compactRegion(mergedRegionInfo.getRegionName()); // clean up the merged region store files @@ -256,7 +260,9 @@ public void testCleanMergeReference() throws Exception { int newcount = 0; while (EnvironmentEdgeManager.currentTime() < timeout) { for (ColumnFamilyDescriptor colFamily : columnFamilies) { - newcount += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); + StoreFileTracker sft = StoreFileTrackerFactory.create(TEST_UTIL.getConfiguration(), + tableDescriptor, colFamily, hrfs, false); + newcount += sft.load().size(); } if (newcount > count) { break; @@ -275,7 +281,9 @@ public void testCleanMergeReference() throws Exception { while (EnvironmentEdgeManager.currentTime() < timeout) { int newcount1 = 0; for (ColumnFamilyDescriptor colFamily : columnFamilies) { - newcount1 += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); + StoreFileTracker sft = StoreFileTrackerFactory.create(TEST_UTIL.getConfiguration(), + tableDescriptor, colFamily, hrfs, false); + newcount1 += sft.load().size(); } if (newcount1 <= 1) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index d56787aba115..df32897876c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -121,8 +121,9 @@ public void testReversibleStoreFileScanner() throws IOException { .withOutputDir(hfilePath).withFileContext(hFileContext).build(); writeStoreFile(writer); - HStoreFile sf = new HStoreFile(fs, writer.getPath(), TEST_UTIL.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, writer.getPath(), true); + HStoreFile sf = new HStoreFile(storeFileInfo, BloomType.NONE, cacheConf); List scanners = StoreFileScanner.getScannersForStoreFiles( Collections.singletonList(sf), false, true, false, false, Long.MAX_VALUE); @@ -172,11 +173,13 @@ public void testReversibleKeyValueHeap() throws IOException { MemStore memstore = new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 }); - HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo1 = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, writer1.getPath(), true); + HStoreFile sf1 = new HStoreFile(storeFileInfo1, BloomType.NONE, cacheConf); - HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo2 = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, writer2.getPath(), true); + HStoreFile sf2 = new HStoreFile(storeFileInfo2, BloomType.NONE, cacheConf); /** * Test without MVCC */ @@ -252,11 +255,13 @@ public void testReversibleStoreScanner() throws IOException { MemStore memstore = new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 }); - HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo1 = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, writer1.getPath(), true); + HStoreFile sf1 = new HStoreFile(storeFileInfo1, BloomType.NONE, cacheConf); - HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf, - BloomType.NONE, true); + StoreFileInfo storeFileInfo2 = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, writer2.getPath(), true); + HStoreFile sf2 = new HStoreFile(storeFileInfo2, BloomType.NONE, cacheConf); ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java index acd5362e0363..b77fae0677d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java @@ -186,7 +186,7 @@ public void testRowPrefixBloomFilter() throws Exception { // read the file ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -259,7 +259,7 @@ public void testRowPrefixBloomFilterWithGet() throws Exception { writeStoreFile(f, bt, expKeys); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); @@ -315,7 +315,7 @@ public void testRowPrefixBloomFilterWithScan() throws Exception { writeStoreFile(f, bt, expKeys); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 91bbea575309..db2a9d68f288 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -91,6 +91,8 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -951,11 +953,14 @@ public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() Collection storefiles = store.getStorefiles(); assertEquals(1, storefiles.size()); assertFalse(region.hasReferences()); - Path referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f", - storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); + HRegionFileSystem hfs = region.getRegionFileSystem(); + StoreFileTracker sft = StoreFileTrackerFactory.create(TESTING_UTIL.getConfiguration(), true, + store.getStoreContext()); + Path referencePath = hfs.splitStoreFile(region.getRegionInfo(), "f", + storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy(), sft); assertNull(referencePath); - referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f", - storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); + referencePath = hfs.splitStoreFile(region.getRegionInfo(), "i_f", + storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy(), sft); assertNotNull(referencePath); } finally { TESTING_UTIL.deleteTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index e7e385d9ffbf..0aa47048945f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -28,10 +28,15 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -97,23 +102,6 @@ public void testEqualsWithLink() throws IOException { assertEquals(info1.hashCode(), info2.hashCode()); } - @Test - public void testOpenErrorMessageHFileLink() throws IOException, IllegalStateException { - // Test file link exception - // Try to open nonsense hfilelink. Make sure exception is from HFileLink. - Path p = new Path("/hbase/test/0123/cf/testtb=4567-abcd"); - try (FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration())) { - StoreFileInfo sfi = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p, true); - try { - ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD); - sfi.createReader(context, null); - throw new IllegalStateException(); - } catch (FileNotFoundException fnfe) { - assertTrue(fnfe.getMessage().contains(HFileLink.class.getSimpleName())); - } - } - } - @Test public void testOpenErrorMessageReference() throws IOException { // Test file link exception @@ -122,8 +110,17 @@ public void testOpenErrorMessageReference() throws IOException { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); fs.mkdirs(p.getParent()); Reference r = Reference.createBottomReference(HConstants.EMPTY_START_ROW); - r.write(fs, p); - StoreFileInfo sfi = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p, true); + RegionInfo regionInfo = RegionInfoBuilder.newBuilder(TableName.valueOf("table1")).build(); + StoreContext storeContext = StoreContext.getBuilder() + .withRegionFileSystem(HRegionFileSystem.create(TEST_UTIL.getConfiguration(), fs, + TEST_UTIL.getDataTestDirOnTestFS(), regionInfo)) + .withColumnFamilyDescriptor( + ColumnFamilyDescriptorBuilder.newBuilder("cf1".getBytes()).build()) + .build(); + StoreFileTrackerForTest storeFileTrackerForTest = + new StoreFileTrackerForTest(TEST_UTIL.getConfiguration(), true, storeContext); + storeFileTrackerForTest.createReference(r, p); + StoreFileInfo sfi = storeFileTrackerForTest.getStoreFileInfo(p, true); try { ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD); sfi.createReader(context, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 79e2f797dc9f..5f36d201a753 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.storefiletracker.FailingStoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -80,41 +82,36 @@ public void setUp() throws IOException { } private TableDescriptor getTableDesc(TableName tableName, int regionReplication, - byte[]... families) { - return getTableDesc(tableName, regionReplication, false, families); + String trackerName, byte[]... families) { + return getTableDesc(tableName, regionReplication, false, trackerName, families); } private TableDescriptor getTableDesc(TableName tableName, int regionReplication, boolean readOnly, - byte[]... families) { + String trackerName, byte[]... families) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) .setRegionReplication(regionReplication).setReadOnly(readOnly); + if (trackerName != null) { + builder.setValue(TRACKER_IMPL, trackerName); + } Arrays.stream(families).map(family -> ColumnFamilyDescriptorBuilder.newBuilder(family) .setMaxVersions(Integer.MAX_VALUE).build()).forEachOrdered(builder::setColumnFamily); return builder.build(); } - static class FailingHRegionFileSystem extends HRegionFileSystem { - boolean fail = false; + public static class FailingHRegionFileSystem extends HRegionFileSystem { + public boolean fail = false; FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, RegionInfo regionInfo) { super(conf, fs, tableDir, regionInfo); } - @Override - public List getStoreFiles(String familyName) throws IOException { - if (fail) { - throw new IOException("simulating FS failure"); - } - return super.getStoreFiles(familyName); - } } private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); - RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey) .setEndKey(stopKey).setRegionId(0L).setReplicaId(replicaId).build(); HRegionFileSystem fs = @@ -200,7 +197,9 @@ public void testIsStale() throws IOException { when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); - TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families); + String trackerName = FailingStoreFileTrackerForTest.class.getName(); + TableDescriptor htd = + getTableDesc(TableName.valueOf(name.getMethodName()), 2, trackerName, families); HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); @@ -252,7 +251,7 @@ public void testRefreshReadOnlyTable() throws IOException { when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); - TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, families); + TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, null, families); HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); @@ -276,11 +275,12 @@ public void testRefreshReadOnlyTable() throws IOException { verifyData(primary, 0, 200, qf, families); // then the table is set to readonly - htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, true, families); + htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, true, null, families); primary.setTableDescriptor(htd); replica1.setTableDescriptor(htd); chore.chore(); // we cannot refresh the store files verifyDataExpectFail(replica1, 100, 100, qf, families); } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 6a251539ccba..9dd8271d7bc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -84,7 +84,7 @@ public void testReseek() throws Exception { writer.close(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); - StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, f, true); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(conf, fs, f, true); storeFileInfo.initHFileInfo(context); StoreFileReader reader = storeFileInfo.createReader(context, cacheConf); storeFileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java index 2f2bc0033d7b..c0b7621e9eb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java @@ -128,7 +128,8 @@ public void testScannerCloseAndUpdateReadersWithMemstoreScanner() throws Excepti region.put(p); HStore store = region.getStore(fam); // use the lock to manually get a new memstore scanner. this is what - // HStore#notifyChangedReadersObservers does under the lock.(lock is not needed here + // HStore#notifyChangedReadersObservers does under the lock.(lock is not needed + // here // since it is just a testcase). store.getStoreEngine().readLock(); final List memScanners = store.memstore.getScanners(Long.MAX_VALUE); @@ -213,9 +214,9 @@ private static KeyValue.Type generateKeyType(Random rand) { } } - private HStoreFile readStoreFile(Path storeFilePath, Configuration conf) throws Exception { + private HStoreFile readStoreFile(StoreFileInfo fileinfo) throws Exception { // Open the file reader with block cache disabled. - HStoreFile file = new HStoreFile(fs, storeFilePath, conf, cacheConf, BloomType.NONE, true); + HStoreFile file = new HStoreFile(fileinfo, BloomType.NONE, cacheConf); return file; } @@ -226,7 +227,8 @@ private void testScannerCloseAndUpdateReaderInternal(boolean awaitUpdate, boolea HStoreFile file = null; List files = new ArrayList(); try { - file = readStoreFile(path, CONF); + StoreFileInfo storeFileInfo = StoreFileInfo.createStoreFileInfoForHFile(CONF, fs, path, true); + file = readStoreFile(storeFileInfo); files.add(file); } catch (Exception e) { // fail test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index 2037b738e433..a479550d7e69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -574,7 +574,10 @@ private static MockHStoreFile createFile(long size, long seqNum, byte[] startKey FileSystem fs = TEST_UTIL.getTestFileSystem(); Path testFilePath = StoreFileWriter.getUniqueFile(fs, CFDIR); fs.create(testFilePath).close(); - MockHStoreFile sf = new MockHStoreFile(TEST_UTIL, testFilePath, size, 0, false, seqNum); + StoreFileInfo storeFileInfo = StoreFileInfo + .createStoreFileInfoForHFile(TEST_UTIL.getConfiguration(), fs, testFilePath, true); + MockHStoreFile sf = + new MockHStoreFile(TEST_UTIL, testFilePath, size, 0, false, seqNum, storeFileInfo); if (startKey != null) { sf.setMetadataValue(StripeStoreFileManager.STRIPE_START_KEY, startKey); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FailingStoreFileTrackerForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FailingStoreFileTrackerForTest.java new file mode 100644 index 000000000000..34a279db5b61 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FailingStoreFileTrackerForTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.storefiletracker; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.regionserver.StoreContext; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.TestStoreFileRefresherChore.FailingHRegionFileSystem; + +public class FailingStoreFileTrackerForTest extends DefaultStoreFileTracker { + + FailingStoreFileTrackerForTest(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { + super(conf, isPrimaryReplica, ctx); + } + + @Override + protected List doLoadStoreFiles(boolean readOnly) throws IOException { + if (ctx.getRegionFileSystem() instanceof FailingHRegionFileSystem) { + if (((FailingHRegionFileSystem) ctx.getRegionFileSystem()).fail) { + throw new IOException("simulating FS failure"); + } + } + return super.doLoadStoreFiles(readOnly); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java index a6ab40b59d82..c2fe9afc7020 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java @@ -27,6 +27,7 @@ import java.util.concurrent.LinkedBlockingQueue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.slf4j.Logger; @@ -69,4 +70,10 @@ public static boolean tracked(String encodedRegionName, String family, Path file public static void clear() { trackedFiles.clear(); } + + @Override + public Reference readReference(Path p) throws IOException { + return super.readReference(p); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java index 02b122f704a7..af1f8ad561d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java @@ -31,11 +31,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -111,7 +115,10 @@ public void testIsStoreFileSizeMatchFilesystemAndManifest() throws IOException { for (RegionInfo regionInfo : regionsInfo) { HRegionFileSystem hRegionFileSystem = HRegionFileSystem.openRegionFromFileSystem(conf, fs, path, regionInfo, true); - Collection storeFilesFS = hRegionFileSystem.getStoreFiles(FAMILY_NAME); + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(FAMILY_NAME); + StoreFileTracker sft = + StoreFileTrackerFactory.create(conf, table.getDescriptor(), hcd, hRegionFileSystem); + Collection storeFilesFS = sft.load(); Iterator sfIterator = storeFilesFS.iterator(); while (sfIterator.hasNext()) { StoreFileInfo sfi = sfIterator.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java index 981e312043ea..962f825ffece 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; @@ -95,8 +96,8 @@ public void testStoresNeedingCompaction() throws Exception { public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception { // this tests that reference files that are new, but have older timestamps for the files // they reference still will get compacted. - TableName table = TableName.valueOf("TestMajorCompactor"); - TableDescriptor htd = UTILITY.createTableDescriptor(table, Bytes.toBytes(FAMILY)); + TableName tableName = TableName.valueOf("TestMajorCompactor"); + TableDescriptor htd = UTILITY.createTableDescriptor(tableName, Bytes.toBytes(FAMILY)); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, UTILITY.getConfiguration(), htd); @@ -111,12 +112,23 @@ public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception { spy(new MajorCompactionRequest(connection, region.getRegionInfo(), Sets.newHashSet(FAMILY))); doReturn(paths).when(majorCompactionRequest).getReferenceFilePaths(any(FileSystem.class), any(Path.class)); + StoreFileTrackerForTest sft = mockSFT(true, storeFiles); doReturn(fileSystem).when(majorCompactionRequest).getFileSystem(); + doReturn(sft).when(majorCompactionRequest).getStoreFileTracker(any(), any()); + doReturn(UTILITY.getConfiguration()).when(connection).getConfiguration(); Set result = majorCompactionRequest.getStoresRequiringCompaction(Sets.newHashSet("a"), 100); assertEquals(FAMILY, Iterables.getOnlyElement(result)); } + protected StoreFileTrackerForTest mockSFT(boolean references, List storeFiles) + throws IOException { + StoreFileTrackerForTest sft = mock(StoreFileTrackerForTest.class); + doReturn(references).when(sft).hasReferences(); + doReturn(storeFiles).when(sft).load(); + return sft; + } + protected HRegionFileSystem mockFileSystem(RegionInfo info, boolean hasReferenceFiles, List storeFiles) throws IOException { long timestamp = storeFiles.stream().findFirst().get().getModificationTime(); @@ -135,7 +147,6 @@ private HRegionFileSystem mockFileSystem(RegionInfo info, boolean hasReferenceFi doReturn(info).when(mockSystem).getRegionInfo(); doReturn(regionStoreDir).when(mockSystem).getStoreDir(FAMILY); doReturn(hasReferenceFiles).when(mockSystem).hasReferences(anyString()); - doReturn(storeFiles).when(mockSystem).getStoreFiles(anyString()); doReturn(fileSystem).when(mockSystem).getFileSystem(); return mockSystem; } @@ -165,6 +176,8 @@ private MajorCompactionRequest makeMockRequest(List storeFiles, b new MajorCompactionRequest(connection, regionInfo, Sets.newHashSet("a")); MajorCompactionRequest spy = spy(request); HRegionFileSystem fileSystem = mockFileSystem(regionInfo, references, storeFiles); + StoreFileTrackerForTest sft = mockSFT(references, storeFiles); + doReturn(sft).when(spy).getStoreFileTracker(any(), any()); doReturn(fileSystem).when(spy).getFileSystem(); return spy; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java index f941282039f0..b3fab4c0b5c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Before; import org.junit.ClassRule; @@ -89,6 +91,8 @@ private MajorCompactionTTLRequest makeMockRequest(List storeFiles MajorCompactionTTLRequest request = new MajorCompactionTTLRequest(connection, regionInfo); MajorCompactionTTLRequest spy = spy(request); HRegionFileSystem fileSystem = mockFileSystem(regionInfo, false, storeFiles); + StoreFileTrackerForTest sft = mockSFT(false, storeFiles); + doReturn(sft).when(spy).getStoreFileTracker(any(), any()); doReturn(fileSystem).when(spy).getFileSystem(); return spy; }