Skip to content

Commit a5ff289

Browse files
authored
HBASE-27484 FNFE on StoreFileScanner after a flush followed by a compaction (#4882)
Signed-off-by: Peter Somogyi <[email protected]>
1 parent 1ddb5bb commit a5ff289

File tree

2 files changed

+70
-1
lines changed
  • hbase-server/src

2 files changed

+70
-1
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -961,10 +961,10 @@ public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread,
961961
storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow,
962962
includeStartRow, stopRow, includeStopRow);
963963
memStoreScanners = this.memstore.getScanners(readPt);
964+
storeFilesToScan.stream().forEach(f -> f.getFileInfo().refCount.incrementAndGet());
964965
} finally {
965966
this.storeEngine.readUnlock();
966967
}
967-
968968
try {
969969
// First the store file scanners
970970

@@ -981,6 +981,8 @@ public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread,
981981
} catch (Throwable t) {
982982
clearAndClose(memStoreScanners);
983983
throw t instanceof IOException ? (IOException) t : new IOException(t);
984+
} finally {
985+
storeFilesToScan.stream().forEach(f -> f.getFileInfo().refCount.decrementAndGet());
984986
}
985987
}
986988

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818
package org.apache.hadoop.hbase.regionserver;
1919

20+
import static org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY;
2021
import static org.junit.Assert.assertArrayEquals;
2122
import static org.junit.Assert.assertEquals;
2223
import static org.junit.Assert.assertFalse;
@@ -41,12 +42,14 @@
4142
import java.util.List;
4243
import java.util.ListIterator;
4344
import java.util.NavigableSet;
45+
import java.util.Optional;
4446
import java.util.TreeSet;
4547
import java.util.concurrent.ConcurrentSkipListSet;
4648
import java.util.concurrent.CountDownLatch;
4749
import java.util.concurrent.CyclicBarrier;
4850
import java.util.concurrent.ExecutorService;
4951
import java.util.concurrent.Executors;
52+
import java.util.concurrent.Future;
5053
import java.util.concurrent.ThreadPoolExecutor;
5154
import java.util.concurrent.TimeUnit;
5255
import java.util.concurrent.atomic.AtomicBoolean;
@@ -103,7 +106,9 @@
103106
import org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType;
104107
import org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action;
105108
import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
109+
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
106110
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
111+
import org.apache.hadoop.hbase.regionserver.compactions.EverythingPolicy;
107112
import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
108113
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
109114
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
@@ -1113,6 +1118,68 @@ public void testRefreshStoreFilesNotChanged() throws IOException {
11131118
verify(spiedStoreEngine, times(1)).replaceStoreFiles(any(), any(), any(), any());
11141119
}
11151120

1121+
@Test
1122+
public void testScanWithCompactionAfterFlush() throws Exception {
1123+
TEST_UTIL.getConfiguration().set(DEFAULT_COMPACTION_POLICY_CLASS_KEY,
1124+
EverythingPolicy.class.getName());
1125+
init(name.getMethodName());
1126+
1127+
assertEquals(0, this.store.getStorefilesCount());
1128+
1129+
KeyValue kv = new KeyValue(row, family, qf1, 1, (byte[]) null);
1130+
// add some data, flush
1131+
this.store.add(kv, null);
1132+
flush(1);
1133+
kv = new KeyValue(row, family, qf2, 1, (byte[]) null);
1134+
// add some data, flush
1135+
this.store.add(kv, null);
1136+
flush(2);
1137+
kv = new KeyValue(row, family, qf3, 1, (byte[]) null);
1138+
// add some data, flush
1139+
this.store.add(kv, null);
1140+
flush(3);
1141+
1142+
ExecutorService service = Executors.newFixedThreadPool(2);
1143+
1144+
Scan scan = new Scan(new Get(row));
1145+
Future<KeyValueScanner> scanFuture = service.submit(() -> {
1146+
try {
1147+
LOG.info(">>>> creating scanner");
1148+
return this.store.createScanner(scan,
1149+
new ScanInfo(HBaseConfiguration.create(),
1150+
ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(4).build(),
1151+
Long.MAX_VALUE, 0, CellComparator.getInstance()),
1152+
scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0);
1153+
} catch (IOException e) {
1154+
e.printStackTrace();
1155+
return null;
1156+
}
1157+
});
1158+
Future compactFuture = service.submit(() -> {
1159+
try {
1160+
LOG.info(">>>>>> starting compaction");
1161+
Optional<CompactionContext> opCompaction = this.store.requestCompaction();
1162+
assertTrue(opCompaction.isPresent());
1163+
store.compact(opCompaction.get(), new NoLimitThroughputController(), User.getCurrent());
1164+
LOG.info(">>>>>> Compaction is finished");
1165+
this.store.closeAndArchiveCompactedFiles();
1166+
LOG.info(">>>>>> Compacted files deleted");
1167+
} catch (IOException e) {
1168+
e.printStackTrace();
1169+
}
1170+
});
1171+
1172+
KeyValueScanner kvs = scanFuture.get();
1173+
compactFuture.get();
1174+
((StoreScanner) kvs).currentScanners.forEach(s -> {
1175+
if (s instanceof StoreFileScanner) {
1176+
assertEquals(1, ((StoreFileScanner) s).getReader().getRefCount());
1177+
}
1178+
});
1179+
kvs.seek(kv);
1180+
service.shutdownNow();
1181+
}
1182+
11161183
private long countMemStoreScanner(StoreScanner scanner) {
11171184
if (scanner.currentScanners == null) {
11181185
return 0;

0 commit comments

Comments
 (0)