|
30 | 30 | import org.apache.lucene.index.IndexFormatTooOldException; |
31 | 31 | import org.apache.lucene.index.IndexNotFoundException; |
32 | 32 | import org.apache.lucene.index.IndexWriter; |
| 33 | +import org.apache.lucene.index.IndexWriterConfig; |
| 34 | +import org.apache.lucene.index.NoMergePolicy; |
33 | 35 | import org.apache.lucene.index.SegmentCommitInfo; |
34 | 36 | import org.apache.lucene.index.SegmentInfos; |
35 | 37 | import org.apache.lucene.store.AlreadyClosedException; |
|
46 | 48 | import org.apache.lucene.util.ArrayUtil; |
47 | 49 | import org.apache.lucene.util.BytesRef; |
48 | 50 | import org.apache.lucene.util.BytesRefBuilder; |
49 | | -import org.elasticsearch.core.internal.io.IOUtils; |
50 | 51 | import org.apache.lucene.util.Version; |
51 | 52 | import org.elasticsearch.ElasticsearchException; |
52 | 53 | import org.elasticsearch.ExceptionsHelper; |
|
69 | 70 | import org.elasticsearch.common.util.concurrent.AbstractRefCounted; |
70 | 71 | import org.elasticsearch.common.util.concurrent.RefCounted; |
71 | 72 | import org.elasticsearch.common.util.iterable.Iterables; |
| 73 | +import org.elasticsearch.core.internal.io.IOUtils; |
72 | 74 | import org.elasticsearch.env.NodeEnvironment; |
73 | 75 | import org.elasticsearch.env.ShardLock; |
74 | 76 | import org.elasticsearch.env.ShardLockObtainFailedException; |
75 | 77 | import org.elasticsearch.index.IndexSettings; |
76 | 78 | import org.elasticsearch.index.engine.Engine; |
| 79 | +import org.elasticsearch.index.engine.InternalEngine; |
77 | 80 | import org.elasticsearch.index.seqno.SequenceNumbers; |
78 | 81 | import org.elasticsearch.index.shard.AbstractIndexShardComponent; |
79 | 82 | import org.elasticsearch.index.shard.IndexShard; |
@@ -155,7 +158,8 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire |
155 | 158 | this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY); |
156 | 159 | } |
157 | 160 |
|
158 | | - public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { |
| 161 | + public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, |
| 162 | + OnClose onClose) throws IOException { |
159 | 163 | super(shardId, indexSettings); |
160 | 164 | final Settings settings = indexSettings.getSettings(); |
161 | 165 | this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); |
@@ -1454,4 +1458,100 @@ private static long estimateSize(Directory directory) throws IOException { |
1454 | 1458 | } |
1455 | 1459 | } |
1456 | 1460 |
|
| 1461 | + /** |
| 1462 | + * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. |
| 1463 | + */ |
| 1464 | + public void createEmpty() throws IOException { |
| 1465 | + metadataLock.writeLock().lock(); |
| 1466 | + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory)) { |
| 1467 | + final Map<String, String> map = new HashMap<>(); |
| 1468 | + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); |
| 1469 | + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); |
| 1470 | + map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); |
| 1471 | + map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); |
| 1472 | + updateCommitData(writer, map); |
| 1473 | + } finally { |
| 1474 | + metadataLock.writeLock().unlock(); |
| 1475 | + } |
| 1476 | + } |
| 1477 | + |
| 1478 | + |
| 1479 | + /** |
| 1480 | + * Marks an existing lucene index with a new history uuid. |
| 1481 | + * This is used to make sure no existing shard will recovery from this index using ops based recovery. |
| 1482 | + */ |
| 1483 | + public void bootstrapNewHistory() throws IOException { |
| 1484 | + metadataLock.writeLock().lock(); |
| 1485 | + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { |
| 1486 | + final Map<String, String> userData = getUserData(writer); |
| 1487 | + final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); |
| 1488 | + final Map<String, String> map = new HashMap<>(); |
| 1489 | + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); |
| 1490 | + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); |
| 1491 | + updateCommitData(writer, map); |
| 1492 | + } finally { |
| 1493 | + metadataLock.writeLock().unlock(); |
| 1494 | + } |
| 1495 | + } |
| 1496 | + |
| 1497 | + /** |
| 1498 | + * Force bakes the given translog generation as recovery information in the lucene index. This is |
| 1499 | + * used when recovering from a snapshot or peer file based recovery where a new empty translog is |
| 1500 | + * created and the existing lucene index needs should be changed to use it. |
| 1501 | + */ |
| 1502 | + public void associateIndexWithNewTranslog(final String translogUUID) throws IOException { |
| 1503 | + metadataLock.writeLock().lock(); |
| 1504 | + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { |
| 1505 | + if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { |
| 1506 | + throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); |
| 1507 | + } |
| 1508 | + final Map<String, String> map = new HashMap<>(); |
| 1509 | + map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); |
| 1510 | + map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); |
| 1511 | + updateCommitData(writer, map); |
| 1512 | + } finally { |
| 1513 | + metadataLock.writeLock().unlock(); |
| 1514 | + } |
| 1515 | + } |
| 1516 | + |
| 1517 | + |
| 1518 | + /** |
| 1519 | + * Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed. |
| 1520 | + */ |
| 1521 | + public void ensureIndexHasHistoryUUID() throws IOException { |
| 1522 | + metadataLock.writeLock().lock(); |
| 1523 | + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { |
| 1524 | + final Map<String, String> userData = getUserData(writer); |
| 1525 | + if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { |
| 1526 | + updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); |
| 1527 | + } |
| 1528 | + } finally { |
| 1529 | + metadataLock.writeLock().unlock(); |
| 1530 | + } |
| 1531 | + } |
| 1532 | + |
| 1533 | + private void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException { |
| 1534 | + final Map<String, String> userData = getUserData(writer); |
| 1535 | + userData.putAll(keysToUpdate); |
| 1536 | + writer.setLiveCommitData(userData.entrySet()); |
| 1537 | + writer.commit(); |
| 1538 | + } |
| 1539 | + |
| 1540 | + private Map<String, String> getUserData(IndexWriter writer) { |
| 1541 | + final Map<String, String> userData = new HashMap<>(); |
| 1542 | + writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); |
| 1543 | + return userData; |
| 1544 | + } |
| 1545 | + |
| 1546 | + private IndexWriter newIndexWriter(IndexWriterConfig.OpenMode openMode, final Directory dir) throws IOException { |
| 1547 | + IndexWriterConfig iwc = new IndexWriterConfig(null) |
| 1548 | + .setCommitOnClose(false) |
| 1549 | + // we don't want merges to happen here - we call maybe merge on the engine |
| 1550 | + // later once we stared it up otherwise we would need to wait for it here |
| 1551 | + // we also don't specify a codec here and merges should use the engines for this index |
| 1552 | + .setMergePolicy(NoMergePolicy.INSTANCE) |
| 1553 | + .setOpenMode(openMode); |
| 1554 | + return new IndexWriter(dir, iwc); |
| 1555 | + } |
| 1556 | + |
1457 | 1557 | } |
0 commit comments