Skip to content

Commit 03596b9

Browse files
author
Prathyusha Garre
committed
HBASE-27826 Refactor code to move creation of Ref files to SFT interface apis
1 parent 4c29c5d commit 03596b9

33 files changed

+627
-238
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ public static String createFromHFileLink(final Configuration conf, final FileSys
456456
* Create the back reference name
457457
*/
458458
// package-private for testing
459-
static String createBackReferenceName(final String tableNameStr, final String regionName) {
459+
public static String createBackReferenceName(final String tableNameStr, final String regionName) {
460460

461461
return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '=');
462462
}

hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ public static Reference convert(final FSProtos.Reference r) {
195195
* delimiter, pb reads to EOF which may not be what you want).
196196
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
197197
*/
198-
byte[] toByteArray() throws IOException {
198+
public byte[] toByteArray() throws IOException {
199199
return ProtobufUtil.prependPBMagic(convert().toByteArray());
200200
}
201201

hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,7 @@ private List<Path> mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem reg
612612
// to read the hfiles.
613613
storeFileInfo.setConf(storeConfiguration);
614614
Path refFile = mergeRegionFs.mergeStoreFile(regionFs.getRegionInfo(), family,
615-
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
615+
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED), tracker);
616616
mergedFiles.add(refFile);
617617
}
618618
}

hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -666,8 +666,9 @@ private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv en
666666
// table dir. In case of failure, the proc would go through this again, already existing
667667
// region dirs and split files would just be ignored, new split files should get created.
668668
int nbFiles = 0;
669-
final Map<String, Collection<StoreFileInfo>> files =
670-
new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
669+
final Map<String, Pair<Collection<StoreFileInfo>, StoreFileTracker>> files =
670+
new HashMap<String, Pair<Collection<StoreFileInfo>, StoreFileTracker>>(
671+
htd.getColumnFamilyCount());
671672
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
672673
String family = cfd.getNameAsString();
673674
StoreFileTracker tracker =
@@ -690,7 +691,7 @@ private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv en
690691
}
691692
if (filteredSfis == null) {
692693
filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
693-
files.put(family, filteredSfis);
694+
files.put(family, new Pair(filteredSfis, tracker));
694695
}
695696
filteredSfis.add(sfi);
696697
nbFiles++;
@@ -713,10 +714,11 @@ private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv en
713714
final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);
714715

715716
// Split each store file.
716-
for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
717+
for (Map.Entry<String, Pair<Collection<StoreFileInfo>, StoreFileTracker>> e : files
718+
.entrySet()) {
717719
byte[] familyName = Bytes.toBytes(e.getKey());
718720
final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
719-
final Collection<StoreFileInfo> storeFiles = e.getValue();
721+
final Collection<StoreFileInfo> storeFiles = e.getValue().getFirst();
720722
if (storeFiles != null && storeFiles.size() > 0) {
721723
final Configuration storeConfiguration =
722724
StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd);
@@ -727,8 +729,9 @@ private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv en
727729
// is running in a regionserver's Store context, or we might not be able
728730
// to read the hfiles.
729731
storeFileInfo.setConf(storeConfiguration);
730-
StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName,
731-
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
732+
StoreFileSplitter sfs =
733+
new StoreFileSplitter(regionFs, e.getValue().getSecond(), familyName,
734+
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
732735
futures.add(threadPool.submit(sfs));
733736
}
734737
}
@@ -794,19 +797,19 @@ private void assertSplitResultFilesCount(final FileSystem fs,
794797
}
795798
}
796799

797-
private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, byte[] family, HStoreFile sf)
798-
throws IOException {
800+
private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, StoreFileTracker tracker,
801+
byte[] family, HStoreFile sf) throws IOException {
799802
if (LOG.isDebugEnabled()) {
800803
LOG.debug("pid=" + getProcId() + " splitting started for store file: " + sf.getPath()
801804
+ " for region: " + getParentRegion().getShortNameToLog());
802805
}
803806

804807
final byte[] splitRow = getSplitRow();
805808
final String familyName = Bytes.toString(family);
806-
final Path path_first =
807-
regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow, false, splitPolicy);
808-
final Path path_second =
809-
regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow, true, splitPolicy);
809+
final Path path_first = regionFs.splitStoreFile(this.daughterOneRI, familyName, sf, splitRow,
810+
false, splitPolicy, tracker);
811+
final Path path_second = regionFs.splitStoreFile(this.daughterTwoRI, familyName, sf, splitRow,
812+
true, splitPolicy, tracker);
810813
if (LOG.isDebugEnabled()) {
811814
LOG.debug("pid=" + getProcId() + " splitting complete for store file: " + sf.getPath()
812815
+ " for region: " + getParentRegion().getShortNameToLog());
@@ -822,22 +825,25 @@ private class StoreFileSplitter implements Callable<Pair<Path, Path>> {
822825
private final HRegionFileSystem regionFs;
823826
private final byte[] family;
824827
private final HStoreFile sf;
828+
private final StoreFileTracker tracker;
825829

826830
/**
827831
* Constructor that takes what it needs to split
828832
* @param regionFs the file system
829833
* @param family Family that contains the store file
830834
* @param sf which file
831835
*/
832-
public StoreFileSplitter(HRegionFileSystem regionFs, byte[] family, HStoreFile sf) {
836+
public StoreFileSplitter(HRegionFileSystem regionFs, StoreFileTracker tracker, byte[] family,
837+
HStoreFile sf) {
833838
this.regionFs = regionFs;
834839
this.sf = sf;
835840
this.family = family;
841+
this.tracker = tracker;
836842
}
837843

838844
@Override
839845
public Pair<Path, Path> call() throws IOException {
840-
return splitStoreFile(regionFs, family, sf);
846+
return splitStoreFile(regionFs, tracker, family, sf);
841847
}
842848
}
843849

hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
import org.apache.hadoop.hbase.MetaTableAccessor;
3636
import org.apache.hadoop.hbase.ScheduledChore;
3737
import org.apache.hadoop.hbase.TableName;
38+
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
3839
import org.apache.hadoop.hbase.client.Connection;
3940
import org.apache.hadoop.hbase.client.ConnectionFactory;
4041
import org.apache.hadoop.hbase.client.Get;
@@ -50,6 +51,8 @@
5051
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
5152
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
5253
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
54+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
55+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
5356
import org.apache.hadoop.hbase.util.Bytes;
5457
import org.apache.hadoop.hbase.util.CommonFSUtils;
5558
import org.apache.hadoop.hbase.util.Pair;
@@ -422,7 +425,7 @@ private static Pair<Boolean, Boolean> checkRegionReferences(MasterServices servi
422425
try {
423426
HRegionFileSystem regionFs = HRegionFileSystem
424427
.openRegionFromFileSystem(services.getConfiguration(), fs, tabledir, region, true);
425-
boolean references = regionFs.hasReferences(tableDescriptor);
428+
boolean references = hasReferences(services.getConfiguration(), regionFs, tableDescriptor);
426429
return new Pair<>(Boolean.TRUE, references);
427430
} catch (IOException e) {
428431
LOG.error("Error trying to determine if region {} has references, assuming it does",
@@ -431,6 +434,17 @@ private static Pair<Boolean, Boolean> checkRegionReferences(MasterServices servi
431434
}
432435
}
433436

437+
private static boolean hasReferences(Configuration conf, HRegionFileSystem regionFs,
438+
TableDescriptor htd) throws IOException {
439+
for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
440+
StoreFileTracker sft = StoreFileTrackerFactory.create(conf, htd, family, regionFs, false);
441+
if (sft.hasReferences(family.getNameAsString())) {
442+
return true;
443+
}
444+
}
445+
return false;
446+
}
447+
434448
private void updateAssignmentManagerMetrics() {
435449
services.getAssignmentManager().getAssignmentManagerMetrics()
436450
.updateHoles(lastReport.getHoles().size());

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
2626
import org.apache.hadoop.hbase.regionserver.BloomType;
2727
import org.apache.hadoop.hbase.regionserver.HStoreFile;
28+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
2829
import org.apache.yetus.audience.InterfaceAudience;
2930

3031
/**
@@ -41,10 +42,12 @@ public CachedMobFile(HStoreFile sf) {
4142
}
4243

4344
public static CachedMobFile create(FileSystem fs, Path path, Configuration conf,
44-
CacheConfig cacheConf) throws IOException {
45-
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a
46-
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
47-
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
45+
CacheConfig cacheConf, StoreFileTracker sft) throws IOException {
46+
// XXX: primaryReplica is only used for constructing the key of block cache so
47+
// it is not a
48+
// critical problem if we pass the wrong value, so here we always pass true.
49+
// Need to fix later.
50+
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true, sft);
4851
return new CachedMobFile(sf);
4952
}
5053

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,17 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
5656
* @param tableName The current table name.
5757
* @param family The current family.
5858
*/
59-
public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family)
59+
public void cleanExpiredMobFiles(TableDescriptor htd, ColumnFamilyDescriptor family)
6060
throws IOException {
6161
Configuration conf = getConf();
62-
TableName tn = TableName.valueOf(tableName);
62+
String tableName = htd.getTableName().getNameAsString();
6363
FileSystem fs = FileSystem.get(conf);
6464
LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName);
6565
// disable the block cache.
6666
Configuration copyOfConf = new Configuration(conf);
6767
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
6868
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
69-
MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig,
69+
MobUtils.cleanExpiredMobFiles(fs, conf, htd, family, cacheConfig,
7070
EnvironmentEdgeManager.currentTime());
7171
}
7272

@@ -105,7 +105,7 @@ public int run(String[] args) throws Exception {
105105
throw new IOException(
106106
"The minVersions of the column family is not 0, could not be handled by this cleaner");
107107
}
108-
cleanExpiredMobFiles(tableName, family);
108+
cleanExpiredMobFiles(htd, family);
109109
return 0;
110110
} finally {
111111
admin.close();

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import org.apache.hadoop.hbase.regionserver.BloomType;
3030
import org.apache.hadoop.hbase.regionserver.HStoreFile;
3131
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
32+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
3233
import org.apache.yetus.audience.InterfaceAudience;
3334

3435
/**
@@ -133,11 +134,13 @@ public void close() throws IOException {
133134
* @param cacheConf The CacheConfig.
134135
* @return An instance of the MobFile.
135136
*/
136-
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf)
137-
throws IOException {
138-
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a
139-
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
140-
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
137+
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf,
138+
StoreFileTracker sft) throws IOException {
139+
// XXX: primaryReplica is only used for constructing the key of block cache so
140+
// it is not a
141+
// critical problem if we pass the wrong value, so here we always pass true.
142+
// Need to fix later.
143+
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true, sft);
141144
return new MobFile(sf);
142145
}
143146
}

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@
3333
import org.apache.hadoop.fs.FileSystem;
3434
import org.apache.hadoop.fs.Path;
3535
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
36+
import org.apache.hadoop.hbase.regionserver.StoreContext;
37+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
38+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
3639
import org.apache.hadoop.hbase.util.IdLock;
3740
import org.apache.yetus.audience.InterfaceAudience;
3841
import org.slf4j.Logger;
@@ -198,9 +201,11 @@ public void evictFile(String fileName) {
198201
* @param cacheConf The current MobCacheConfig
199202
* @return A opened mob file.
200203
*/
201-
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
204+
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf,
205+
StoreContext storeContext) throws IOException {
206+
StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, storeContext);
202207
if (!isCacheEnabled) {
203-
MobFile mobFile = MobFile.create(fs, path, conf, cacheConf);
208+
MobFile mobFile = MobFile.create(fs, path, conf, cacheConf, sft);
204209
mobFile.open();
205210
return mobFile;
206211
} else {
@@ -214,7 +219,7 @@ public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws
214219
if (map.size() > mobFileMaxCacheSize) {
215220
evict();
216221
}
217-
cached = CachedMobFile.create(fs, path, conf, cacheConf);
222+
cached = CachedMobFile.create(fs, path, conf, cacheConf, sft);
218223
cached.open();
219224
map.put(fileName, cached);
220225
miss.increment();

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ protected void chore() {
8787
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
8888
if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
8989
try {
90-
cleaner.cleanExpiredMobFiles(htd.getTableName().getNameAsString(), hcd);
90+
cleaner.cleanExpiredMobFiles(htd, hcd);
9191
} catch (IOException e) {
9292
LOG.error("Failed to clean the expired mob files table={} family={}",
9393
htd.getTableName().getNameAsString(), hcd.getNameAsString(), e);

0 commit comments

Comments
 (0)