From 2183671cd5a37cee774cd75218f74d4d9c43cb59 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Jul 2019 08:29:09 +0200 Subject: [PATCH 1/2] Some Cleanup in o.e.i.shard * Extract one duplicated method * Cleanup obviously unused code --- .../index/cache/bitset/BitsetFilterCache.java | 28 ++++++------ .../shard/AbstractIndexShardComponent.java | 7 --- .../index/shard/ElasticsearchMergePolicy.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 44 +++++++------------ .../shard/IndexShardClosedException.java | 4 -- .../index/shard/InternalIndexingStats.java | 5 --- .../index/shard/PrimaryReplicaSyncer.java | 10 ++--- .../RemoveCorruptedLuceneSegmentsAction.java | 4 +- .../RemoveCorruptedShardDataCommand.java | 8 ++-- .../elasticsearch/index/shard/ShardPath.java | 5 ++- .../index/shard/ShardSplittingQuery.java | 13 +----- .../index/shard/ShardStateMetaData.java | 10 ++--- .../index/shard/StoreRecovery.java | 24 +++++----- .../index/engine/TranslogHandler.java | 3 +- 14 files changed, 64 insertions(+), 103 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index b28f069b2ebc7..ed8699647af1f 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -91,6 +91,19 @@ public BitsetFilterCache(IndexSettings indexSettings, Listener listener) { this.listener = listener; } + public static BitSet setupBitset(LeafReaderContext context, Query query) throws IOException { + final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); + final IndexSearcher searcher = new IndexSearcher(topLevelContext); + searcher.setQueryCache(null); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + Scorer s = weight.scorer(context); + if (s == null) { + return null; + } else { + return BitSet.of(s.iterator(), context.reader().maxDoc()); + } + } + public IndexWarmer.Listener createListener(ThreadPool threadPool) { return new BitSetProducerWarmer(threadPool); } @@ -115,7 +128,7 @@ public void clear(String reason) { loadedFilters.invalidateAll(); } - private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException { + private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws ExecutionException { final IndexReader.CacheHelper cacheHelper = context.reader().getCoreCacheHelper(); if (cacheHelper == null) { throw new IllegalArgumentException("Reader " + context.reader() + " does not support caching"); @@ -133,18 +146,7 @@ private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext }); return filterToFbs.computeIfAbsent(query, key -> { - final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); - final IndexSearcher searcher = new IndexSearcher(topLevelContext); - searcher.setQueryCache(null); - final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); - Scorer s = weight.scorer(context); - final BitSet bitSet; - if (s == null) { - bitSet = null; - } else { - bitSet = BitSet.of(s.iterator(), context.reader().maxDoc()); - } - + final BitSet bitSet = setupBitset(context, query); Value value = new Value(bitSet, shardId); listener.onCache(shardId, value.bitset); return value; diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index c967e94f7dae7..5f76c35128fde 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -20,14 +20,12 @@ package org.elasticsearch.index.shard; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexSettings; public abstract class AbstractIndexShardComponent implements IndexShardComponent { protected final Logger logger; - protected final DeprecationLogger deprecationLogger; protected final ShardId shardId; protected final IndexSettings indexSettings; @@ -35,7 +33,6 @@ protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettin this.shardId = shardId; this.indexSettings = indexSettings; this.logger = Loggers.getLogger(getClass(), shardId); - this.deprecationLogger = new DeprecationLogger(logger); } @Override @@ -47,8 +44,4 @@ public ShardId shardId() { public IndexSettings indexSettings() { return indexSettings; } - - public String nodeName() { - return indexSettings.getNodeName(); - } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index c6f28732b37c2..0905b3147dcff 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -46,7 +46,7 @@ */ public final class ElasticsearchMergePolicy extends FilterMergePolicy { - private static Logger logger = LogManager.getLogger(ElasticsearchMergePolicy.class); + private static final Logger logger = LogManager.getLogger(ElasticsearchMergePolicy.class); // True if the next merge request should do segment upgrades: private volatile boolean upgradeInProgress; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 1cd5499866a23..92fd7c7e53968 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -40,7 +40,6 @@ import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; @@ -326,17 +325,15 @@ public IndexShard( this.pendingPrimaryTerm = primaryTerm; this.globalCheckpointListeners = new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), threadPool.scheduler(), logger); - final ReplicationTracker replicationTracker = - new ReplicationTracker( - shardId, - aId, - indexSettings, - primaryTerm, - UNASSIGNED_SEQ_NO, - globalCheckpointListeners::globalCheckpointUpdated, - threadPool::absoluteTimeInMillis, - (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, retentionLeases, listener)); - this.replicationTracker = replicationTracker; + this.replicationTracker = new ReplicationTracker( + shardId, + aId, + indexSettings, + primaryTerm, + UNASSIGNED_SEQ_NO, + globalCheckpointListeners::globalCheckpointUpdated, + threadPool::absoluteTimeInMillis, + (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, retentionLeases, listener)); // the query cache is a node-level thing, however we want the most popular filters // to be computed on a per-shard basis @@ -586,7 +583,7 @@ public void onFailure(Exception e) { : "a started primary with non-pending operation term must be in primary mode " + this.shardRouting; shardStateUpdated.countDown(); } - if (currentRouting != null && currentRouting.active() == false && newRouting.active()) { + if (currentRouting.active() == false && newRouting.active()) { indexEventListener.afterIndexShardStarted(this); } if (newRouting.equals(currentRouting) == false) { @@ -631,8 +628,7 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta public void relocated(final String targetAllocationId, final Consumer consumer) throws IllegalIndexShardStateException, IllegalStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; - final Releasable forceRefreshes = refreshListeners.forceRefreshes(); - try { + try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { forceRefreshes.close(); // no shard operation permits are being held here, move state from started to relocated @@ -665,8 +661,6 @@ public void relocated(final String targetAllocationId, final Consumer segments(boolean verbose) { return getEngine().segments(verbose); } - public void flushAndCloseEngine() throws IOException { - getEngine().flushAndClose(); - } - public String getHistoryUUID() { return getEngine().getHistoryUUID(); } @@ -2876,7 +2866,7 @@ protected void write(List>> candida } } }; - }; + } /** * Syncs the given location with the underlying storage unless already synced. This method might return immediately without @@ -2988,7 +2978,7 @@ private RefreshListeners buildRefreshListeners() { return new RefreshListeners( indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), - threadPool.executor(ThreadPool.Names.LISTENER)::execute, + threadPool.executor(ThreadPool.Names.LISTENER), logger, threadPool.getThreadContext(), externalRefreshMetric); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java index 11d9804b1003f..61ea8eacd38c7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java @@ -28,10 +28,6 @@ public IndexShardClosedException(ShardId shardId) { super(shardId, IndexShardState.CLOSED, "Closed"); } - public IndexShardClosedException(ShardId shardId, Throwable t) { - super(shardId, IndexShardState.CLOSED, "Closed", t); - } - public IndexShardClosedException(ShardId shardId, String message) { super(shardId, IndexShardState.CLOSED, message); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java index a41bd36d812c7..182f5cd099cbd 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -176,10 +176,5 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(), noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis)); } - - void clear() { - indexMetric.clear(); - deleteMetric.clear(); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 17ef424185d1f..f4cd1cdb8115e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -172,7 +172,7 @@ public void onFailure(Exception e) { } }; try { - new SnapshotSender(logger, syncAction, resyncTask, shardId, primaryAllocationId, primaryTerm, snapshot, chunkSize.bytesAsInt(), + new SnapshotSender(syncAction, resyncTask, shardId, primaryAllocationId, primaryTerm, snapshot, chunkSize.bytesAsInt(), startingSeqNo, maxSeqNo, maxSeenAutoIdTimestamp, wrappedListener).run(); } catch (Exception e) { wrappedListener.onFailure(e); @@ -200,12 +200,12 @@ static class SnapshotSender extends AbstractRunnable implements ActionListener listener) { - this.logger = logger; + this.logger = PrimaryReplicaSyncer.logger; this.syncAction = syncAction; this.task = task; this.shardId = shardId; @@ -232,7 +232,7 @@ public void onFailure(Exception e) { } } - private static Translog.Operation[] EMPTY_ARRAY = new Translog.Operation[0]; + private static final Translog.Operation[] EMPTY_ARRAY = new Translog.Operation[0]; @Override protected void doRun() throws Exception { diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java index da0257c19e334..b78de89bda3c6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java @@ -33,8 +33,7 @@ */ public class RemoveCorruptedLuceneSegmentsAction { - public Tuple getCleanStatus(ShardPath shardPath, - Directory indexDirectory, + public Tuple getCleanStatus(Directory indexDirectory, Lock writeLock, PrintStream printStream, boolean verbose) throws IOException { @@ -62,7 +61,6 @@ public Tuple getCleanStatus } public void execute(Terminal terminal, - ShardPath shardPath, Directory indexDirectory, Lock writeLock, PrintStream printStream, diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 5fc3ba57980bf..0fc4e20df6bfa 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -289,7 +289,7 @@ public void write(int b) { terminal.println("Opening Lucene index at " + indexPath); terminal.println(""); try { - indexCleanStatus = removeCorruptedLuceneSegmentsAction.getCleanStatus(shardPath, indexDir, + indexCleanStatus = removeCorruptedLuceneSegmentsAction.getCleanStatus(indexDir, writeIndexLock, printStream, verbose); } catch (Exception e) { terminal.println(e.getMessage()); @@ -355,7 +355,7 @@ public void write(int b) { confirm("Continue and remove corrupted data from the shard ?", terminal); if (indexStatus != CleanStatus.CLEAN) { - removeCorruptedLuceneSegmentsAction.execute(terminal, shardPath, indexDir, + removeCorruptedLuceneSegmentsAction.execute(terminal, indexDir, writeIndexLock, printStream, verbose); } @@ -373,7 +373,7 @@ public void write(int b) { // newHistoryCommit obtains its own lock addNewHistoryCommit(indexDir, terminal, translogStatus != CleanStatus.CLEAN); - newAllocationId(environment, shardPath, terminal); + newAllocationId(shardPath, terminal); if (indexStatus != CleanStatus.CLEAN) { dropCorruptMarkerFiles(terminal, indexPath, indexDir, indexStatus == CleanStatus.CLEAN_WITH_CORRUPTED_MARKER); } @@ -425,7 +425,7 @@ protected void addNewHistoryCommit(Directory indexDirectory, Terminal terminal, } } - protected void newAllocationId(Environment environment, ShardPath shardPath, Terminal terminal) throws IOException { + private void newAllocationId(ShardPath shardPath, Terminal terminal) throws IOException { final Path shardStatePath = shardPath.getShardStatePath(); final ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardStatePath); diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 32d38d9803414..ac865704d51bb 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -34,6 +34,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; +import java.util.Objects; public final class ShardPath { public static final String INDEX_FOLDER_NAME = "index"; @@ -283,10 +284,10 @@ public boolean equals(Object o) { return false; } final ShardPath shardPath = (ShardPath) o; - if (shardId != null ? !shardId.equals(shardPath.shardId) : shardPath.shardId != null) { + if (Objects.equals(shardId, shardPath.shardId) == false) { return false; } - if (path != null ? !path.equals(shardPath.path) : shardPath.path != null) { + if (Objects.equals(path, shardPath.path) == false) { return false; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index 4082293d9f2eb..003e73b12e163 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -19,11 +19,9 @@ package org.elasticsearch.index.shard; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -44,6 +42,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -339,15 +338,7 @@ public float matchCost() { * executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either. */ private static BitSetProducer newParentDocBitSetProducer() { - return context -> { - Query query = Queries.newNonNestedFilter(); - final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); - final IndexSearcher searcher = new IndexSearcher(topLevelContext); - searcher.setQueryCache(null); - final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); - Scorer s = weight.scorer(context); - return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); - }; + return context -> BitsetFilterCache.setupBitset(context, Queries.newNonNestedFilter()); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java index 5d4a093cc4ba2..bd54db38d0792 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java @@ -30,12 +30,12 @@ import java.io.IOException; import java.io.OutputStream; +import java.util.Objects; public final class ShardStateMetaData { private static final String SHARD_STATE_FILE_PREFIX = "state-"; private static final String PRIMARY_KEY = "primary"; - private static final String VERSION_KEY = "version"; // for pre-5.0 shards that have not yet been active private static final String INDEX_UUID_KEY = "index_uuid"; private static final String ALLOCATION_ID_KEY = "allocation_id"; @@ -65,10 +65,10 @@ public boolean equals(Object o) { if (primary != that.primary) { return false; } - if (indexUUID != null ? !indexUUID.equals(that.indexUUID) : that.indexUUID != null) { + if (indexUUID.equals(that.indexUUID) == false) { return false; } - if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) { + if (Objects.equals(allocationId, that.allocationId) == false) { return false; } @@ -77,7 +77,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - int result = (indexUUID != null ? indexUUID.hashCode() : 0); + int result = indexUUID.hashCode(); result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); return result; @@ -125,8 +125,6 @@ public ShardStateMetaData fromXContent(XContentParser parser) throws IOException primary = parser.booleanValue(); } else if (INDEX_UUID_KEY.equals(currentFieldName)) { indexUUID = parser.text(); - } else if (VERSION_KEY.equals(currentFieldName)) { - // ES versions before 6.0 wrote this for legacy reasons, just ignore for now and remove in 7.0 } else { throw new CorruptStateException("unexpected field in shard state [" + currentFieldName + "]"); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index fae3703027f9e..7e3a6c33d1c70 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -99,7 +99,7 @@ boolean recoverFromStore(final IndexShard indexShard) { } boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, - final IndexShard indexShard, final List shards) throws IOException { + final IndexShard indexShard, final List shards) { if (canRecover(indexShard)) { RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); assert recoveryType == RecoverySource.Type.LOCAL_SHARDS: "expected local shards recovery type: " + recoveryType; @@ -393,12 +393,7 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe recoveryState.getIndex().updateVersion(version); if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { assert indexShouldExists; - store.bootstrapNewHistory(); - final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); - final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), localCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); - store.associateIndexWithNewTranslog(translogUUID); + bootstrap(indexShard, store); writeEmptyRetentionLeasesFile(indexShard); } else if (indexShouldExists) { if (recoveryState.getRecoverySource().shouldBootstrapNewHistoryUUID()) { @@ -472,12 +467,7 @@ private void restore(final IndexShard indexShard, final Repository repository, f repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); - store.bootstrapNewHistory(); - final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); - final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), localCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); - store.associateIndexWithNewTranslog(translogUUID); + bootstrap(indexShard, store); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; writeEmptyRetentionLeasesFile(indexShard); indexShard.openEngineAndRecoverFromTranslog(); @@ -489,4 +479,12 @@ private void restore(final IndexShard indexShard, final Repository repository, f } } + private void bootstrap(final IndexShard indexShard, final Store store) throws IOException { + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), localCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); + store.associateIndexWithNewTranslog(translogUUID); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 8e8b4687844b2..44adb9958632b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -123,8 +123,7 @@ private Engine.Operation convertToEngineOp(Translog.Operation operation, Engine. final Translog.Index index = (Translog.Index) operation; final String indexName = mapperService.index().getName(); final Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), - mapperService.getIndexSettings().getIndexVersionCreated(), - new SourceToParse(indexName, index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source()), + new SourceToParse(indexName, index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source()), index.routing()), index.seqNo(), index.primaryTerm(), index.version(), null, origin, index.getAutoGeneratedIdTimestamp(), true, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); return engineIndex; From a5a2f0b82d7352fe06c0c1238f406d74ee63a932 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Jul 2019 16:12:21 +0200 Subject: [PATCH 2/2] CR: routing is never null + renaming --- .../elasticsearch/index/cache/bitset/BitsetFilterCache.java | 4 ++-- .../main/java/org/elasticsearch/index/shard/IndexShard.java | 5 +++-- .../org/elasticsearch/index/shard/ShardSplittingQuery.java | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index ed8699647af1f..73053e6371362 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -91,7 +91,7 @@ public BitsetFilterCache(IndexSettings indexSettings, Listener listener) { this.listener = listener; } - public static BitSet setupBitset(LeafReaderContext context, Query query) throws IOException { + public static BitSet bitsetFromQuery(Query query, LeafReaderContext context) throws IOException { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); @@ -146,7 +146,7 @@ private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext }); return filterToFbs.computeIfAbsent(query, key -> { - final BitSet bitSet = setupBitset(context, query); + final BitSet bitSet = bitsetFromQuery(query, context); Value value = new Value(bitSet, shardId); listener.onCache(shardId, value.bitset); return value; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 92fd7c7e53968..3a519390a0dc5 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -440,16 +440,17 @@ public void updateShardState(final ShardRouting newRouting, final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; + assert currentRouting != null; if (!newRouting.shardId().equals(shardId())) { throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId()); } - if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) { + if (newRouting.isSameAllocation(currentRouting) == false) { throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting); } - if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) { + if (currentRouting.primary() && newRouting.primary() == false) { throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current " + currentRouting + ", new " + newRouting); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index 003e73b12e163..cde87fe69f04f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -338,7 +338,7 @@ public float matchCost() { * executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either. */ private static BitSetProducer newParentDocBitSetProducer() { - return context -> BitsetFilterCache.setupBitset(context, Queries.newNonNestedFilter()); + return context -> BitsetFilterCache.bitsetFromQuery(Queries.newNonNestedFilter(), context); } }