From a5504980c3a977db34eb4a3a361b0d4582079c07 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 23 Nov 2016 09:42:39 -0500 Subject: [PATCH 01/17] Add primary term to doc write response This commit adds the primary term to the doc write response. --- .../action/DocWriteResponse.java | 27 +++++++- .../action/bulk/TransportShardBulkAction.java | 64 ++++++++++------- .../action/delete/DeleteResponse.java | 6 +- .../action/index/IndexResponse.java | 7 +- .../action/update/TransportUpdateAction.java | 6 +- .../action/update/UpdateResponse.java | 11 +-- .../elasticsearch/index/engine/Engine.java | 45 +++++++----- .../index/engine/InternalEngine.java | 68 ++++++++----------- .../action/DocWriteResponseTests.java | 4 ++ .../action/bulk/BulkRequestModifierTests.java | 2 +- .../bulk/TransportShardBulkActionTests.java | 29 ++++---- .../AsyncBulkByScrollActionTests.java | 11 +-- .../action/delete/DeleteResponseTests.java | 12 ++-- .../action/index/IndexRequestTests.java | 3 +- .../action/index/IndexResponseTests.java | 12 ++-- .../action/update/UpdateResponseTests.java | 13 ++-- .../ESIndexLevelReplicationTestCase.java | 1 + .../shard/IndexingOperationListenerTests.java | 4 +- .../index/translog/TranslogTests.java | 5 +- .../recovery/RecoverySourceHandlerTests.java | 4 +- docs/reference/docs/bulk.asciidoc | 14 ++-- docs/reference/docs/index_.asciidoc | 2 + docs/reference/getting-started.asciidoc | 5 +- docs/reference/ingest/ingest-node.asciidoc | 3 +- .../query-dsl/percolate-query.asciidoc | 3 +- 25 files changed, 214 insertions(+), 147 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index c3479ecc0cfff..720ac9c8d5bcf 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -57,6 +57,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr private static final String _ID = "_id"; private static final String _VERSION = "_version"; private static final String _SEQ_NO = "_seq_no"; + private static final String _PRIMARY_TERM = "_primary_term"; private static final String RESULT = "result"; private static final String FORCED_REFRESH = "forced_refresh"; @@ -116,14 +117,16 @@ public void writeTo(StreamOutput out) throws IOException { private String type; private long version; private long seqNo; + private long primaryTerm; private boolean forcedRefresh; protected Result result; - public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long version, Result result) { + public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { this.shardId = shardId; this.type = type; this.id = id; this.seqNo = seqNo; + this.primaryTerm = primaryTerm; this.version = version; this.result = result; } @@ -182,6 +185,15 @@ public long getSeqNo() { return seqNo; } + /** + * The primary term for this change. + * + * @return the primary term + */ + public long getPrimaryTerm() { + return primaryTerm; + } + /** * Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to * {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will @@ -251,8 +263,10 @@ public void readFrom(StreamInput in) throws IOException { version = in.readZLong(); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { seqNo = in.readZLong(); + primaryTerm = in.readVLong(); } else { seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + primaryTerm = 0; } forcedRefresh = in.readBoolean(); result = Result.readFrom(in); @@ -267,6 +281,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(version); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { out.writeZLong(seqNo); + out.writeVLong(primaryTerm); } out.writeBoolean(forcedRefresh); result.writeTo(out); @@ -293,6 +308,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(_SHARDS, shardInfo); if (getSeqNo() >= 0) { builder.field(_SEQ_NO, getSeqNo()); + builder.field(_PRIMARY_TERM, getPrimaryTerm()); } return builder; } @@ -333,7 +349,9 @@ protected static void parseInnerToXContent(XContentParser parser, Builder contex context.setForcedRefresh(parser.booleanValue()); } else if (_SEQ_NO.equals(currentFieldName)) { context.setSeqNo(parser.longValue()); - } else { + } else if (_PRIMARY_TERM.equals(currentFieldName)) { + context.setPrimaryTerm(parser.longValue()); + } else{ throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { @@ -362,6 +380,7 @@ public abstract static class Builder { protected boolean forcedRefresh; protected ShardInfo shardInfo = null; protected Long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + protected Long primaryTerm = 0L; public ShardId getShardId() { return shardId; @@ -407,6 +426,10 @@ public void setSeqNo(Long seqNo) { this.seqNo = seqNo; } + public void setPrimaryTerm(Long primaryTerm) { + this.primaryTerm = primaryTerm; + } + public abstract DocWriteResponse build(); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 697f4c2f9938e..d82bc1caab288 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -139,7 +139,7 @@ private static BulkItemResultHolder executeIndexRequest(final IndexRequest index return new BulkItemResultHolder(null, indexResult, bulkItemRequest); } else { IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), - indexResult.getSeqNo(), indexResult.getVersion(), indexResult.isCreated()); + indexResult.getSeqNo(), indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); return new BulkItemResultHolder(response, indexResult, bulkItemRequest); } } @@ -152,7 +152,7 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); } else { DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), - deleteResult.getSeqNo(), deleteResult.getVersion(), deleteResult.isFound()); + deleteResult.getSeqNo(), deleteResult.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); } } @@ -272,7 +272,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq int requestIndex, UpdateHelper updateHelper, LongSupplier nowInMillis, final MappingUpdatePerformer mappingUpdater) throws Exception { - Engine.Result updateOperationResult = null; + Engine.Result result = null; UpdateResponse updateResponse = null; BulkItemRequest replicaRequest = request.items()[requestIndex]; int maxAttempts = updateRequest.retryOnConflict(); @@ -284,7 +284,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request - updateOperationResult = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); + result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); break; // out of retry loop } // execute translated update request @@ -294,34 +294,41 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq IndexRequest indexRequest = translate.action(); MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); indexRequest.process(mappingMd, request.index()); - updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater); + result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater); break; case DELETED: DeleteRequest deleteRequest = translate.action(); - updateOperationResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + result = executeDeleteRequestOnPrimary(deleteRequest, primary); break; case NOOP: primary.noopUpdate(updateRequest.type()); break; default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); } - if (updateOperationResult == null) { + if (result == null) { // this is a noop operation updateResponse = translate.action(); break; // out of retry loop - } else if (updateOperationResult.hasFailure() == false) { + } else if (result.hasFailure() == false) { // enrich update response and // set translated update (index/delete) request for replica execution in bulk items - switch (updateOperationResult.getOperationType()) { + switch (result.getOperationType()) { case INDEX: + assert result instanceof Engine.IndexResult : result.getClass(); IndexRequest updateIndexRequest = translate.action(); - final IndexResponse indexResponse = new IndexResponse(primary.shardId(), - updateIndexRequest.type(), updateIndexRequest.id(), updateOperationResult.getSeqNo(), - updateOperationResult.getVersion(), ((Engine.IndexResult) updateOperationResult).isCreated()); + final IndexResponse indexResponse = + new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + result.getVersion(), ((Engine.IndexResult) result).isCreated()); BytesReference indexSourceAsBytes = updateIndexRequest.source(); - updateResponse = new UpdateResponse(indexResponse.getShardInfo(), - indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), - indexResponse.getVersion(), indexResponse.getResult()); + updateResponse = new UpdateResponse( + indexResponse.getShardInfo(), + indexResponse.getShardId(), + indexResponse.getType(), + indexResponse.getId(), + indexResponse.getSeqNo(), + indexResponse.getPrimaryTerm(), + indexResponse.getVersion(), + indexResponse.getResult()); if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || (updateRequest.fields() != null && updateRequest.fields().length > 0)) { Tuple> sourceAndContent = @@ -333,13 +340,20 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest); break; case DELETE: + assert result instanceof Engine.DeleteResult : result.getClass(); DeleteRequest updateDeleteRequest = translate.action(); DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), - updateDeleteRequest.type(), updateDeleteRequest.id(), updateOperationResult.getSeqNo(), - updateOperationResult.getVersion(), ((Engine.DeleteResult) updateOperationResult).isFound()); - updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), - deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), - deleteResponse.getVersion(), deleteResponse.getResult()); + updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + result.getVersion(), ((Engine.DeleteResult) result).isFound()); + updateResponse = new UpdateResponse( + deleteResponse.getShardInfo(), + deleteResponse.getShardId(), + deleteResponse.getType(), + deleteResponse.getId(), + deleteResponse.getSeqNo(), + deleteResponse.getPrimaryTerm(), + deleteResponse.getVersion(), + deleteResponse.getResult()); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), deleteResponse.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); @@ -347,15 +361,15 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); break; } - assert updateOperationResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO; + assert result.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO; // successful operation break; // out of retry loop - } else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) { + } else if (result.getFailure() instanceof VersionConflictEngineException == false) { // not a version conflict exception break; // out of retry loop } } - return new BulkItemResultHolder(updateResponse, updateOperationResult, replicaRequest); + return new BulkItemResultHolder(updateResponse, result, replicaRequest); } static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) { @@ -496,7 +510,7 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque mappingUpdater.updateMappings(mappingUpdate, primary.shardId(), request.type()); } } catch (MapperParsingException | IllegalArgumentException failure) { - return new Engine.IndexResult(failure, request.version()); + return new Engine.IndexResult(failure, request.version(), primary.getPrimaryTerm()); } // Verify that there are no more mappings that need to be applied. If there are failures, a @@ -509,7 +523,7 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque } catch (MapperParsingException | IllegalStateException e) { // there was an error in parsing the document that was not because // of pending mapping updates, so return a failure for the result - return new Engine.IndexResult(e, request.version()); + return new Engine.IndexResult(e, request.version(), primary.getPrimaryTerm()); } } else { // There was no mapping update, the operation is the same as the pre-update version. diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 3680d09d39b2a..1e42537395f7b 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -42,8 +42,8 @@ public class DeleteResponse extends DocWriteResponse { public DeleteResponse() { } - public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean found) { - super(shardId, type, id, seqNo, version, found ? Result.DELETED : Result.NOT_FOUND); + public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean found) { + super(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND); } @Override @@ -112,7 +112,7 @@ public void setFound(boolean found) { @Override public DeleteResponse build() { - DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found); + DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, found); deleteResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { deleteResponse.setShardInfo(shardInfo); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 6310a2aac1868..f3b71d590ff88 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -43,8 +43,8 @@ public class IndexResponse extends DocWriteResponse { public IndexResponse() { } - public IndexResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean created) { - super(shardId, type, id, seqNo, version, created ? Result.CREATED : Result.UPDATED); + public IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean created) { + super(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED); } @Override @@ -62,6 +62,7 @@ public String toString() { builder.append(",version=").append(getVersion()); builder.append(",result=").append(getResult().getLowercase()); builder.append(",seqNo=").append(getSeqNo()); + builder.append(",primaryTerm=").append(getPrimaryTerm()); builder.append(",shards=").append(Strings.toString(getShardInfo())); return builder.append("]").toString(); } @@ -114,7 +115,7 @@ public void setCreated(boolean created) { @Override public IndexResponse build() { - IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created); + IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, created); indexResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { indexResponse.setShardInfo(shardInfo); diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 67d62113062a7..189803f818fcd 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -179,7 +179,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference upsertSourceBytes = upsertRequest.source(); bulkAction.execute(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || (request.fields() != null && request.fields().length > 0)) { Tuple> sourceAndContent = @@ -200,7 +200,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference indexSourceBytes = indexRequest.source(); bulkAction.execute(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); @@ -211,7 +211,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< DeleteRequest deleteRequest = result.action(); bulkAction.execute(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index 6f736a024eb39..672b190d91130 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -47,11 +47,12 @@ public UpdateResponse() { * For example: update script with operation set to none */ public UpdateResponse(ShardId shardId, String type, String id, long version, Result result) { - this(new ShardInfo(0, 0), shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, result); + this(new ShardInfo(0, 0), shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, version, result); } - public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String type, String id, long seqNo, long version, Result result) { - super(shardId, type, id, seqNo, version, result); + public UpdateResponse( + ShardInfo shardInfo, ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, type, id, seqNo, primaryTerm, version, result); setShardInfo(shardInfo); } @@ -106,6 +107,8 @@ public String toString() { builder.append(",type=").append(getType()); builder.append(",id=").append(getId()); builder.append(",version=").append(getVersion()); + builder.append(",seqNo=").append(getSeqNo()); + builder.append(",primaryTerm=").append(getPrimaryTerm()); builder.append(",result=").append(getResult().getLowercase()); builder.append(",shards=").append(getShardInfo()); return builder.append("]").toString(); @@ -154,7 +157,7 @@ public void setGetResult(GetResult getResult) { public UpdateResponse build() { UpdateResponse update; if (shardInfo != null && seqNo != null) { - update = new UpdateResponse(shardInfo, shardId, type, id, seqNo, version, result); + update = new UpdateResponse(shardInfo, shardId, type, id, seqNo, primaryTerm, version, result); } else { update = new UpdateResponse(shardId, type, id, version, result); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 59655abf2894c..342fd0d3dc5d8 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -307,20 +307,22 @@ public abstract static class Result { private final Operation.TYPE operationType; private final long version; private final long seqNo; + private final long primaryTerm; private final Exception failure; private final SetOnce freeze = new SetOnce<>(); private Translog.Location translogLocation; private long took; - protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { + protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo, long primaryTerm) { this.operationType = operationType; this.failure = failure; this.version = version; this.seqNo = seqNo; + this.primaryTerm = primaryTerm; } - protected Result(Operation.TYPE operationType, long version, long seqNo) { - this(operationType, null, version, seqNo); + protected Result(Operation.TYPE operationType, long version, long seqNo, long primaryTerm) { + this(operationType, null, version, seqNo, primaryTerm); } /** whether the operation had failure */ @@ -342,6 +344,15 @@ public long getSeqNo() { return seqNo; } + /** + * Get the primary term. + * + * @return the primary term + */ + public long getPrimaryTerm() { + return primaryTerm; + } + /** get the translog location after executing the operation */ public Translog.Location getTranslogLocation() { return translogLocation; @@ -388,8 +399,8 @@ public static class IndexResult extends Result { private final boolean created; - public IndexResult(long version, long seqNo, boolean created) { - super(Operation.TYPE.INDEX, version, seqNo); + public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { + super(Operation.TYPE.INDEX, version, seqNo, primaryTerm); this.created = created; } @@ -397,12 +408,12 @@ public IndexResult(long version, long seqNo, boolean created) { * use in case of index operation failed before getting to internal engine * (e.g while preparing operation or updating mappings) * */ - public IndexResult(Exception failure, long version) { - this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); + public IndexResult(Exception failure, long version, long primaryTerm) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm); } - public IndexResult(Exception failure, long version, long seqNo) { - super(Operation.TYPE.INDEX, failure, version, seqNo); + public IndexResult(Exception failure, long version, long seqNo, long primaryTerm) { + super(Operation.TYPE.INDEX, failure, version, seqNo, primaryTerm); this.created = false; } @@ -416,13 +427,13 @@ public static class DeleteResult extends Result { private final boolean found; - public DeleteResult(long version, long seqNo, boolean found) { - super(Operation.TYPE.DELETE, version, seqNo); + public DeleteResult(long version, long seqNo, long primaryTerm, boolean found) { + super(Operation.TYPE.DELETE, version, seqNo, primaryTerm); this.found = found; } - public DeleteResult(Exception failure, long version, long seqNo, boolean found) { - super(Operation.TYPE.DELETE, failure, version, seqNo); + public DeleteResult(Exception failure, long version, long seqNo, long primaryTerm, boolean found) { + super(Operation.TYPE.DELETE, failure, version, seqNo, primaryTerm); this.found = found; } @@ -434,12 +445,12 @@ public boolean isFound() { static class NoOpResult extends Result { - NoOpResult(long seqNo) { - super(Operation.TYPE.NO_OP, 0, seqNo); + NoOpResult(long seqNo, long primaryTerm) { + super(Operation.TYPE.NO_OP, 0, seqNo, primaryTerm); } - NoOpResult(long seqNo, Exception failure) { - super(Operation.TYPE.NO_OP, failure, 0, seqNo); + NoOpResult(long seqNo, long primaryTerm, Exception failure) { + super(Operation.TYPE.NO_OP, failure, 0, seqNo, primaryTerm); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 0bed51e0e24a1..77f6e3dd98300 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -611,8 +611,8 @@ public IndexResult index(Index index) throws IOException { } else if (plan.indexIntoLucene) { indexResult = indexIntoLucene(index, plan); } else { - indexResult = new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, - plan.currentNotFoundOrDeleted); + indexResult = new IndexResult( + plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); } if (indexResult.hasFailure() == false && index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -698,10 +698,9 @@ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { } if (index.versionType().isVersionConflictForWrites( currentVersion, index.version(), currentNotFoundOrDeleted)) { - plan = IndexingStrategy.skipDueToVersionConflict( - new VersionConflictEngineException(shardId, index, currentVersion, - currentNotFoundOrDeleted), - currentNotFoundOrDeleted, currentVersion); + final VersionConflictEngineException e = + new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted); + plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, index.primaryTerm()); } else { plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted, seqNoService().generateSeqNo(), @@ -733,7 +732,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); - return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); + return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { /* There is no tragic event recorded so this must be a document failure. @@ -822,12 +821,11 @@ static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing) { return new IndexingStrategy(true, false, true, seqNoForIndexing, 1, null); } - static IndexingStrategy skipDueToVersionConflict(VersionConflictEngineException e, - boolean currentNotFoundOrDeleted, - long currentVersion) { - return new IndexingStrategy(currentNotFoundOrDeleted, false, - false, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, - new IndexResult(e, currentVersion)); + static IndexingStrategy skipDueToVersionConflict( + VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long primaryTerm) { + final IndexResult result = new IndexResult(e, currentVersion, primaryTerm); + return new IndexingStrategy( + currentNotFoundOrDeleted, false, false, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, @@ -897,8 +895,8 @@ public DeleteResult delete(Delete delete) throws IOException { } else if (plan.deleteFromLucene) { deleteResult = deleteInLucene(delete, plan); } else { - deleteResult = new DeleteResult(plan.versionOfDeletion, plan.seqNoOfDeletion, - plan.currentlyDeleted == false); + deleteResult = new DeleteResult( + plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } if (!deleteResult.hasFailure() && delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -970,9 +968,8 @@ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException } final DeletionStrategy plan; if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) { - plan = DeletionStrategy.skipDueToVersionConflict( - new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted), - currentVersion, currentlyDeleted); + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted); + plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, delete.primaryTerm(), currentlyDeleted); } else { plan = DeletionStrategy.processNormally(currentlyDeleted, seqNoService().generateSeqNo(), @@ -993,12 +990,12 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { // there is no tragic event and such it must be a document level failure - return new DeleteResult(ex, plan.versionOfDeletion, plan.versionOfDeletion, - plan.currentlyDeleted == false); + return new DeleteResult( + ex, plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } else { throw ex; } @@ -1028,26 +1025,21 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, Optional.empty() : Optional.of(earlyResultOnPreflightError); } - static DeletionStrategy skipDueToVersionConflict(VersionConflictEngineException e, - long currentVersion, boolean currentlyDeleted) { - return new DeletionStrategy(false, currentlyDeleted, - SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, - new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, - currentlyDeleted == false)); + static DeletionStrategy skipDueToVersionConflict( + VersionConflictEngineException e, long currentVersion, long primaryTerm, boolean currentlyDeleted) { + final DeleteResult deleteResult = + new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, currentlyDeleted == false); + return new DeletionStrategy( + false, currentlyDeleted, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, deleteResult); } - static DeletionStrategy processNormally(boolean currentlyDeleted, - long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(true, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, - null); + static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(true, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } - public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, - long seqNoOfDeletion, - long versionOfDeletion) { - return new DeletionStrategy(false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, - null); + public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } } @@ -1065,7 +1057,7 @@ public NoOpResult noOp(final NoOp noOp) { try (ReleasableLock ignored = readLock.acquire()) { noOpResult = innerNoOp(noOp); } catch (final Exception e) { - noOpResult = new NoOpResult(noOp.seqNo(), e); + noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm(), e); } return noOpResult; } @@ -1074,7 +1066,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); + final NoOpResult noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm()); final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); noOpResult.setTook(System.nanoTime() - noOp.startTime()); diff --git a/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java b/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java index 52eb8a82743c6..bb1f2d2a637f5 100644 --- a/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java @@ -43,6 +43,7 @@ public void testGetLocation() { "type", "id", SequenceNumbersService.UNASSIGNED_SEQ_NO, + 17, 0, Result.CREATED) {}; assertEquals("/index/type/id", response.getLocation(null)); @@ -56,6 +57,7 @@ public void testGetLocationNonAscii() { "type", "❤", SequenceNumbersService.UNASSIGNED_SEQ_NO, + 17, 0, Result.CREATED) {}; assertEquals("/index/type/%E2%9D%A4", response.getLocation(null)); @@ -69,6 +71,7 @@ public void testGetLocationWithSpaces() { "type", "a b", SequenceNumbersService.UNASSIGNED_SEQ_NO, + 17, 0, Result.CREATED) {}; assertEquals("/index/type/a+b", response.getLocation(null)); @@ -86,6 +89,7 @@ public void testToXContentDoesntIncludeForcedRefreshUnlessForced() throws IOExce "type", "id", SequenceNumbersService.UNASSIGNED_SEQ_NO, + 17, 0, Result.CREATED) { // DocWriteResponse is abstract so we have to sneak a subclass in here to test it. diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java index b6242e6d5fcd4..e7bd34e76ef3a 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java @@ -117,7 +117,7 @@ public void onFailure(Exception e) { for (DocWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), - indexRequest.id(), 1, 1, true); + indexRequest.id(), 1, 17, 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 355b3978cbf46..a4bf0d77a1c26 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -93,7 +93,7 @@ public void testShouldExecuteReplicaItem() throws Exception { // Successful index request should be replicated DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); - DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean()); + DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); BulkItemRequest request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, response)); assertTrue(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); @@ -101,7 +101,7 @@ public void testShouldExecuteReplicaItem() throws Exception { // Failed index requests should not be replicated (for now!) writeRequest = new IndexRequest("index", "type", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); - response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean()); + response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse( new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, @@ -466,8 +466,8 @@ public void testUpdateReplicaRequestWithSuccess() throws Exception { boolean created = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); - Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation); - DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 1, created); + Engine.IndexResult indexResult = new FakeResult(1, 1, 17, created, resultLocation); + DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created); BulkItemResultHolder goodResults = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); @@ -505,10 +505,12 @@ public void testCalculateTranslogLocation() throws Exception { equalTo(original)); boolean created = randomBoolean(); - DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 1, created); + DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created); Translog.Location newLocation = new Translog.Location(1, 1, 1); - Engine.IndexResult indexResult = new IndexResultWithLocation(randomNonNegativeLong(), - randomNonNegativeLong(), created, newLocation); + final long version = randomNonNegativeLong(); + final long seqNo = randomNonNegativeLong(); + final long primaryTerm = randomIntBetween(1, 16); + Engine.IndexResult indexResult = new IndexResultWithLocation(version, seqNo, primaryTerm, created, newLocation); results = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results), equalTo(newLocation)); @@ -581,9 +583,8 @@ public void verifyMappings(Engine.Index operation, public class IndexResultWithLocation extends Engine.IndexResult { private final Translog.Location location; - public IndexResultWithLocation(long version, long seqNo, boolean created, - Translog.Location newLocation) { - super(version, seqNo, created); + public IndexResultWithLocation(long version, long seqNo, long primaryTerm, boolean created, Translog.Location newLocation) { + super(version, seqNo, primaryTerm, created); this.location = newLocation; } @@ -597,8 +598,7 @@ public void testPrepareIndexOpOnReplica() throws Exception { IndexMetaData metaData = indexMetaData(); IndexShard shard = newStartedShard(false); - DocWriteResponse primaryResponse = new IndexResponse(shardId, "index", "id", - 1, 1, randomBoolean()); + DocWriteResponse primaryResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, randomBoolean()); IndexRequest request = new IndexRequest("index", "type", "id") .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); @@ -619,9 +619,8 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; - protected FakeResult(long version, long seqNo, boolean created, - Translog.Location location) { - super(version, seqNo, created); + protected FakeResult(long version, long seqNo, long primaryTerm, boolean created, Translog.Location location) { + super(version, seqNo, primaryTerm, created); this.location = location; } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java index 5786482e79e00..fa42573e439ee 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java @@ -290,10 +290,11 @@ public void testBulkResponseSetsLotsOfStatus() { default: throw new RuntimeException("Bad scenario"); } - responses[i] = new BulkItemResponse( - i, - opType, - new IndexResponse(shardId, "type", "id" + i, randomInt(20), randomInt(), createdResponse)); + final int seqNo = randomInt(20); + final int primaryTerm = randomIntBetween(1, 16); + final IndexResponse response = + new IndexResponse(shardId, "type", "id" + i, seqNo, primaryTerm, randomInt(), createdResponse); + responses[i] = new BulkItemResponse(i, opType, response); } new DummyAsyncBulkByScrollAction().onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(responses, 0)); assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts()); @@ -799,6 +800,7 @@ RequestBuilder extends ActionRequestBuilder> index.type(), index.id(), randomInt(20), + randomIntBetween(1, 16), randomIntBetween(0, Integer.MAX_VALUE), true); } else if (item instanceof UpdateRequest) { @@ -813,6 +815,7 @@ RequestBuilder extends ActionRequestBuilder> delete.type(), delete.id(), randomInt(20), + randomIntBetween(1, 16), randomIntBetween(0, Integer.MAX_VALUE), true); } else { diff --git a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index 2b13b2b8b4e8f..95fbbe8ed1466 100644 --- a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -40,13 +40,13 @@ public class DeleteResponseTests extends ESTestCase { public void testToXContent() { { - DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 5, true); + DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 17, 5, true); String output = Strings.toString(response); assertEquals("{\"found\":true,\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":5,\"result\":\"deleted\"," + - "\"_shards\":null,\"_seq_no\":3}", output); + "\"_shards\":null,\"_seq_no\":3,\"_primary_term\":17}", output); } { - DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 7, true); + DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 0, 7, true); response.setForcedRefresh(true); response.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); String output = Strings.toString(response); @@ -89,17 +89,19 @@ public static Tuple randomDeleteResponse() { String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); long seqNo = randomFrom(SequenceNumbersService.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); + long primaryTerm = seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); long version = randomBoolean() ? randomNonNegativeLong() : randomIntBetween(0, 10000); boolean found = randomBoolean(); boolean forcedRefresh = randomBoolean(); Tuple shardInfos = RandomObjects.randomShardInfo(random()); - DeleteResponse actual = new DeleteResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, version, found); + DeleteResponse actual = new DeleteResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, primaryTerm, version, found); actual.setForcedRefresh(forcedRefresh); actual.setShardInfo(shardInfos.v1()); - DeleteResponse expected = new DeleteResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), type, id, seqNo, version, found); + DeleteResponse expected = + new DeleteResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), type, id, seqNo, primaryTerm, version, found); expected.setForcedRefresh(forcedRefresh); expected.setShardInfo(shardInfos.v2()); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index b4836496f885f..4fb1d0c648ea2 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -135,7 +135,7 @@ public void testIndexResponse() { String id = randomAlphaOfLengthBetween(3, 10); long version = randomLong(); boolean created = randomBoolean(); - IndexResponse indexResponse = new IndexResponse(shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, created); + IndexResponse indexResponse = new IndexResponse(shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, version, created); int total = randomIntBetween(1, 10); int successful = randomIntBetween(1, 10); ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); @@ -156,6 +156,7 @@ public void testIndexResponse() { assertEquals("IndexResponse[index=" + shardId.getIndexName() + ",type=" + type + ",id="+ id + ",version=" + version + ",result=" + (created ? "created" : "updated") + ",seqNo=" + SequenceNumbersService.UNASSIGNED_SEQ_NO + + ",primaryTerm=" + 0 + ",shards={\"total\":" + total + ",\"successful\":" + successful + ",\"failed\":0}]", indexResponse.toString()); } diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index c222d2d8964da..58947a7173e3d 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -41,13 +41,13 @@ public class IndexResponseTests extends ESTestCase { public void testToXContent() { { - IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 5, true); + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 17, 5, true); String output = Strings.toString(indexResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":5,\"result\":\"created\",\"_shards\":null," + - "\"_seq_no\":3,\"created\":true}", output); + "\"_seq_no\":3,\"_primary_term\":17,\"created\":true}", output); } { - IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 7, true); + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 17, 7, true); indexResponse.setForcedRefresh(true); indexResponse.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); String output = Strings.toString(indexResponse); @@ -102,17 +102,19 @@ public static Tuple randomIndexResponse() { String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); long seqNo = randomFrom(SequenceNumbersService.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); + long primaryTerm = seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); long version = randomBoolean() ? randomNonNegativeLong() : randomIntBetween(0, 10000); boolean created = randomBoolean(); boolean forcedRefresh = randomBoolean(); Tuple shardInfos = RandomObjects.randomShardInfo(random()); - IndexResponse actual = new IndexResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, version, created); + IndexResponse actual = new IndexResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, primaryTerm, version, created); actual.setForcedRefresh(forcedRefresh); actual.setShardInfo(shardInfos.v1()); - IndexResponse expected = new IndexResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), type, id, seqNo, version, created); + IndexResponse expected = + new IndexResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), type, id, seqNo, primaryTerm, version, created); expected.setForcedRefresh(forcedRefresh); expected.setShardInfo(shardInfos.v2()); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index 153c27b370363..1c80ddca1c533 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -57,10 +57,10 @@ public void testToXContent() throws IOException { } { UpdateResponse updateResponse = new UpdateResponse(new ReplicationResponse.ShardInfo(10, 6), - new ShardId("index", "index_uuid", 1), "type", "id", 3, 1, DELETED); + new ShardId("index", "index_uuid", 1), "type", "id", 3, 17, 1, DELETED); String output = Strings.toString(updateResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"result\":\"deleted\"," + - "\"_shards\":{\"total\":10,\"successful\":6,\"failed\":0},\"_seq_no\":3}", output); + "\"_shards\":{\"total\":10,\"successful\":6,\"failed\":0},\"_seq_no\":3,\"_primary_term\":17}", output); } { BytesReference source = new BytesArray("{\"title\":\"Book title\",\"isbn\":\"ABC-123\"}"); @@ -69,12 +69,12 @@ public void testToXContent() throws IOException { fields.put("isbn", new GetField("isbn", Collections.singletonList("ABC-123"))); UpdateResponse updateResponse = new UpdateResponse(new ReplicationResponse.ShardInfo(3, 2), - new ShardId("books", "books_uuid", 2), "book", "1", 7, 2, UPDATED); + new ShardId("books", "books_uuid", 2), "book", "1", 7, 17, 2, UPDATED); updateResponse.setGetResult(new GetResult("books", "book", "1", 2, true, source, fields)); String output = Strings.toString(updateResponse); assertEquals("{\"_index\":\"books\",\"_type\":\"book\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + - "\"_shards\":{\"total\":3,\"successful\":2,\"failed\":0},\"_seq_no\":7,\"get\":{\"found\":true," + + "\"_shards\":{\"total\":3,\"successful\":2,\"failed\":0},\"_seq_no\":7,\"_primary_term\":17,\"get\":{\"found\":true," + "\"_source\":{\"title\":\"Book title\",\"isbn\":\"ABC-123\"},\"fields\":{\"isbn\":[\"ABC-123\"],\"title\":[\"Book " + "title\"]}}}", output); } @@ -128,6 +128,7 @@ public static Tuple randomUpdateResponse(XConten // We also want small number values (randomNonNegativeLong() tend to generate high numbers) // in order to catch some conversion error that happen between int/long after parsing. Long seqNo = randomFrom(randomNonNegativeLong(), (long) randomIntBetween(0, 10_000), null); + long primaryTerm = seqNo == null ? 0 : randomIntBetween(1, 16); ShardId actualShardId = new ShardId(index, indexUUid, shardId); ShardId expectedShardId = new ShardId(index, INDEX_UUID_NA_VALUE, -1); @@ -136,8 +137,8 @@ public static Tuple randomUpdateResponse(XConten if (seqNo != null) { Tuple shardInfos = RandomObjects.randomShardInfo(random()); - actual = new UpdateResponse(shardInfos.v1(), actualShardId, type, id, seqNo, version, result); - expected = new UpdateResponse(shardInfos.v2(), expectedShardId, type, id, seqNo, version, result); + actual = new UpdateResponse(shardInfos.v1(), actualShardId, type, id, seqNo, primaryTerm, version, result); + expected = new UpdateResponse(shardInfos.v2(), expectedShardId, type, id, seqNo, primaryTerm, version, result); } else { actual = new UpdateResponse(actualShardId, type, id, version, result); expected = new UpdateResponse(expectedShardId, type, id, version, result); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c35f72d208533..75d3e1a52ada5 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -569,6 +569,7 @@ protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) request.type(), request.id(), indexResult.getSeqNo(), + indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 88d8a075e1b3e..b8a6c9c12a64f 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -137,7 +137,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid())); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc); - compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); + compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); @@ -161,7 +161,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); + compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 1e2d81705df02..f84d41528f393 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2055,6 +2055,7 @@ public void testTranslogOpSerialization() throws Exception { SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers"; long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong(); + long primaryTerm = randomSeqNum == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 16); long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong(); seqID.seqNo.setLongValue(randomSeqNum); seqID.seqNoDocValue.setLongValue(randomSeqNum); @@ -2073,7 +2074,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); - Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); + Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, primaryTerm, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); BytesStreamOutput out = new BytesStreamOutput(); @@ -2084,7 +2085,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0); - Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); + Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, primaryTerm, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); out = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 40a92b11e7372..d96350eec388b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -170,12 +170,12 @@ public void testSendSnapshotSendsOps() throws IOException { final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true))); } final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64); for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, 1, true))); } operations.add(null); int totalOperations = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() { diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index d25641cacbd9e..e12d6ab4b255b 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -104,7 +104,8 @@ The result of this bulk operation is: }, "created": true, "status": 201, - "_seq_no" : 0 + "_seq_no" : 0, + "_primary_term": 1 } }, { @@ -121,7 +122,8 @@ The result of this bulk operation is: "failed": 0 }, "status": 404, - "_seq_no" : 1 + "_seq_no" : 1, + "_primary_term" : 2 } }, { @@ -138,7 +140,8 @@ The result of this bulk operation is: }, "created": true, "status": 201, - "_seq_no" : 2 + "_seq_no" : 2, + "_primary_term" : 3 } }, { @@ -154,13 +157,14 @@ The result of this bulk operation is: "failed": 0 }, "status": 200, - "_seq_no" : 3 + "_seq_no" : 3, + "_primary_term" : 4 } } ] } -------------------------------------------------- -// TESTRESPONSE[s/"took": 30/"took": $body.took/ s/"index_uuid": .../"index_uuid": $body.items.3.update.error.index_uuid/ s/"_seq_no" : 0/"_seq_no" : $body.items.0.index._seq_no/ s/"_seq_no" : 1/"_seq_no" : $body.items.1.delete._seq_no/ s/"_seq_no" : 2/"_seq_no" : $body.items.2.create._seq_no/ s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/] +// TESTRESPONSE[s/"took": 30/"took": $body.took/ s/"index_uuid": .../"index_uuid": $body.items.3.update.error.index_uuid/ s/"_seq_no" : 0/"_seq_no" : $body.items.0.index._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body.items.0.index._primary_term/ s/"_seq_no" : 1/"_seq_no" : $body.items.1.delete._seq_no/ s/"_primary_term" : 2/"_primary_term" : $body.items.1.delete._primary_term/ s/"_seq_no" : 2/"_seq_no" : $body.items.2.create._seq_no/ s/"_primary_term" : 3/"_primary_term" : $body.items.2.create._primary_term/ s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/ s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/] The endpoints are `/_bulk`, `/{index}/_bulk`, and `{index}/{type}/_bulk`. When the index or the index/type are provided, they will be used by diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index fb904e121756d..2af9cf0a0c529 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -32,6 +32,7 @@ The result of the above index operation is: "_version" : 1, "created" : true, "_seq_no" : 0, + "_primary_term" : 1, "result" : created } -------------------------------------------------- @@ -230,6 +231,7 @@ The result of the above index operation is: "_version" : 1, "created" : true, "_seq_no" : 0, + "_primary_term" : 1, "result": "created" } -------------------------------------------------- diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 87d20b35221c7..608a462e1f3ea 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -323,10 +323,11 @@ And the response: "failed" : 0 }, "created" : true, - "_seq_no" : 0 + "_seq_no" : 0, + "_primary_term" : 1 } -------------------------------------------------- -// TESTRESPONSE[s/"_seq_no" : 0/"_seq_no" : $body._seq_no/] +// TESTRESPONSE[s/"_seq_no" : 0/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] From the above, we can see that a new customer document was successfully created inside the customer index and the external type. The document also has an internal id of 1 which we specified at index time. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 970863041a9d1..9f919a2802ded 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -906,7 +906,8 @@ PUT /myindex/type/1?pipeline=monthlyindex "failed" : 0 }, "created" : true, - "_seq_no" : 0 + "_seq_no" : 0, + "_primary_term" : 1 } -------------------------------------------------- // TESTRESPONSE diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 0e4ce7a8d4e76..1d43bff06a136 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -182,7 +182,8 @@ Index response: }, "created": true, "result": "created", - "_seq_no" : 1 + "_seq_no" : 1, + "_primary_term" : 1 } -------------------------------------------------- // TESTRESPONSE From d0b6c285eef5c6e70c5595df55f40bbc0aacb077 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 00:02:50 -0400 Subject: [PATCH 02/17] Fix whitespace --- .../main/java/org/elasticsearch/action/DocWriteResponse.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 720ac9c8d5bcf..aacb0ff17e744 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -351,7 +351,7 @@ protected static void parseInnerToXContent(XContentParser parser, Builder contex context.setSeqNo(parser.longValue()); } else if (_PRIMARY_TERM.equals(currentFieldName)) { context.setPrimaryTerm(parser.longValue()); - } else{ + } else { throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { From 4c6b35cfb9c9db9de962013cf036f8e6f9056a1c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 00:08:38 -0400 Subject: [PATCH 03/17] Break crazy docs assertion into multiple lines --- docs/reference/docs/bulk.asciidoc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index e12d6ab4b255b..0c2d5fd447312 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -164,7 +164,16 @@ The result of this bulk operation is: ] } -------------------------------------------------- -// TESTRESPONSE[s/"took": 30/"took": $body.took/ s/"index_uuid": .../"index_uuid": $body.items.3.update.error.index_uuid/ s/"_seq_no" : 0/"_seq_no" : $body.items.0.index._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body.items.0.index._primary_term/ s/"_seq_no" : 1/"_seq_no" : $body.items.1.delete._seq_no/ s/"_primary_term" : 2/"_primary_term" : $body.items.1.delete._primary_term/ s/"_seq_no" : 2/"_seq_no" : $body.items.2.create._seq_no/ s/"_primary_term" : 3/"_primary_term" : $body.items.2.create._primary_term/ s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/ s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/] +// TESTRESPONSE[s/"took": 30/"took": $body.took/] +// TESTRESPONSE[s/"index_uuid": .../"index_uuid": $body.items.3.update.error.index_uuid/] +// TESTRESPONSE[s/"_seq_no" : 0/"_seq_no" : $body.items.0.index._seq_no/] +// TESTRESPONSE[s/"_primary_term" : 1/"_primary_term" : $body.items.0.index._primary_term/] +// TESTRESPONSE[s/"_seq_no" : 1/"_seq_no" : $body.items.1.delete._seq_no/] +// TESTRESPONSE[s/"_primary_term" : 2/"_primary_term" : $body.items.1.delete._primary_term/] +// TESTRESPONSE[s/"_seq_no" : 2/"_seq_no" : $body.items.2.create._seq_no/] +// TESTRESPONSE[s/"_primary_term" : 3/"_primary_term" : $body.items.2.create._primary_term/] +// TESTRESPONSE[s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/] +// TESTRESPONSE[s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/] The endpoints are `/_bulk`, `/{index}/_bulk`, and `{index}/{type}/_bulk`. When the index or the index/type are provided, they will be used by From d4bbfcde9713a234d2e82d6ac5dba97ab696adf9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 10:35:14 -0400 Subject: [PATCH 04/17] Remove primary term from result --- .../action/bulk/TransportShardBulkAction.java | 8 ++-- .../elasticsearch/index/engine/Engine.java | 43 +++++++------------ .../index/engine/InternalEngine.java | 12 +++--- .../ESIndexLevelReplicationTestCase.java | 2 +- .../shard/IndexingOperationListenerTests.java | 2 +- .../index/translog/TranslogTests.java | 2 +- 6 files changed, 29 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index d82bc1caab288..e6f9516925878 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -139,7 +139,7 @@ private static BulkItemResultHolder executeIndexRequest(final IndexRequest index return new BulkItemResultHolder(null, indexResult, bulkItemRequest); } else { IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), - indexResult.getSeqNo(), indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); + indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); return new BulkItemResultHolder(response, indexResult, bulkItemRequest); } } @@ -152,7 +152,7 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); } else { DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), - deleteResult.getSeqNo(), deleteResult.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); + deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); } } @@ -317,7 +317,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.IndexResult : result.getClass(); IndexRequest updateIndexRequest = translate.action(); final IndexResponse indexResponse = - new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated()); BytesReference indexSourceAsBytes = updateIndexRequest.source(); updateResponse = new UpdateResponse( @@ -343,7 +343,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.DeleteResult : result.getClass(); DeleteRequest updateDeleteRequest = translate.action(); DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), - updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound()); updateResponse = new UpdateResponse( deleteResponse.getShardInfo(), diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 342fd0d3dc5d8..31ba05817b756 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -307,22 +307,20 @@ public abstract static class Result { private final Operation.TYPE operationType; private final long version; private final long seqNo; - private final long primaryTerm; private final Exception failure; private final SetOnce freeze = new SetOnce<>(); private Translog.Location translogLocation; private long took; - protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo, long primaryTerm) { + protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { this.operationType = operationType; this.failure = failure; this.version = version; this.seqNo = seqNo; - this.primaryTerm = primaryTerm; } - protected Result(Operation.TYPE operationType, long version, long seqNo, long primaryTerm) { - this(operationType, null, version, seqNo, primaryTerm); + protected Result(Operation.TYPE operationType, long version, long seqNo) { + this(operationType, null, version, seqNo); } /** whether the operation had failure */ @@ -344,15 +342,6 @@ public long getSeqNo() { return seqNo; } - /** - * Get the primary term. - * - * @return the primary term - */ - public long getPrimaryTerm() { - return primaryTerm; - } - /** get the translog location after executing the operation */ public Translog.Location getTranslogLocation() { return translogLocation; @@ -400,7 +389,7 @@ public static class IndexResult extends Result { private final boolean created; public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { - super(Operation.TYPE.INDEX, version, seqNo, primaryTerm); + super(Operation.TYPE.INDEX, version, seqNo); this.created = created; } @@ -408,12 +397,12 @@ public IndexResult(long version, long seqNo, long primaryTerm, boolean created) * use in case of index operation failed before getting to internal engine * (e.g while preparing operation or updating mappings) * */ - public IndexResult(Exception failure, long version, long primaryTerm) { - this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm); + public IndexResult(Exception failure, long version) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); } - public IndexResult(Exception failure, long version, long seqNo, long primaryTerm) { - super(Operation.TYPE.INDEX, failure, version, seqNo, primaryTerm); + public IndexResult(Exception failure, long version, long seqNo) { + super(Operation.TYPE.INDEX, failure, version, seqNo); this.created = false; } @@ -427,13 +416,13 @@ public static class DeleteResult extends Result { private final boolean found; - public DeleteResult(long version, long seqNo, long primaryTerm, boolean found) { - super(Operation.TYPE.DELETE, version, seqNo, primaryTerm); + public DeleteResult(long version, long seqNo, boolean found) { + super(Operation.TYPE.DELETE, version, seqNo); this.found = found; } - public DeleteResult(Exception failure, long version, long seqNo, long primaryTerm, boolean found) { - super(Operation.TYPE.DELETE, failure, version, seqNo, primaryTerm); + public DeleteResult(Exception failure, long version, long seqNo, boolean found) { + super(Operation.TYPE.DELETE, failure, version, seqNo); this.found = found; } @@ -445,12 +434,12 @@ public boolean isFound() { static class NoOpResult extends Result { - NoOpResult(long seqNo, long primaryTerm) { - super(Operation.TYPE.NO_OP, 0, seqNo, primaryTerm); + NoOpResult(long seqNo) { + super(Operation.TYPE.NO_OP, 0, seqNo); } - NoOpResult(long seqNo, long primaryTerm, Exception failure) { - super(Operation.TYPE.NO_OP, failure, 0, seqNo, primaryTerm); + NoOpResult(long seqNo, Exception failure) { + super(Operation.TYPE.NO_OP, failure, 0, seqNo); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 77f6e3dd98300..107430b0a7405 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -896,7 +896,7 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } if (!deleteResult.hasFailure() && delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -990,12 +990,12 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { // there is no tragic event and such it must be a document level failure return new DeleteResult( - ex, plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + ex, plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } else { throw ex; } @@ -1028,7 +1028,7 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, long primaryTerm, boolean currentlyDeleted) { final DeleteResult deleteResult = - new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, currentlyDeleted == false); + new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, currentlyDeleted == false); return new DeletionStrategy( false, currentlyDeleted, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, deleteResult); } @@ -1057,7 +1057,7 @@ public NoOpResult noOp(final NoOp noOp) { try (ReleasableLock ignored = readLock.acquire()) { noOpResult = innerNoOp(noOp); } catch (final Exception e) { - noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm(), e); + noOpResult = new NoOpResult(noOp.seqNo(), e); } return noOpResult; } @@ -1066,7 +1066,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm()); + final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); noOpResult.setTook(System.nanoTime() - noOp.startTime()); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 75d3e1a52ada5..2996362735f2d 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -569,7 +569,7 @@ protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) request.type(), request.id(), indexResult.getSeqNo(), - indexResult.getPrimaryTerm(), + primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index b8a6c9c12a64f..0be902cae1b3d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -137,7 +137,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid())); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc); - compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true)); + compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index f84d41528f393..2b8f7515ae7a6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2085,7 +2085,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0); - Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, primaryTerm, true); + Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); out = new BytesStreamOutput(); From 64772ae0b301a7dcfbeff303fc5f685292c80dc9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 10:46:14 -0400 Subject: [PATCH 05/17] Remove one more usage --- core/src/main/java/org/elasticsearch/index/engine/Engine.java | 2 +- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- .../action/bulk/TransportShardBulkActionTests.java | 4 ++-- .../index/shard/IndexingOperationListenerTests.java | 2 +- .../java/org/elasticsearch/index/translog/TranslogTests.java | 2 +- .../indices/recovery/RecoverySourceHandlerTests.java | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index d9762afd22c91..122587949e319 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -387,7 +387,7 @@ public static class IndexResult extends Result { private final boolean created; - public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { + public IndexResult(long version, long seqNo, boolean created) { super(Operation.TYPE.INDEX, version, seqNo); this.created = created; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5e47331a78a42..bc08ba474956b 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -612,7 +612,7 @@ public IndexResult index(Index index) throws IOException { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( - plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); + plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location; @@ -738,7 +738,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); - return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); + return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { /* There is no tragic event recorded so this must be a document failure. diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index a444f0d6c6cd1..1459d32fe3351 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -617,7 +617,7 @@ public void verifyMappings(Engine.Index operation, public class IndexResultWithLocation extends Engine.IndexResult { private final Translog.Location location; public IndexResultWithLocation(long version, long seqNo, long primaryTerm, boolean created, Translog.Location newLocation) { - super(version, seqNo, primaryTerm, created); + super(version, seqNo, created); this.location = newLocation; } @@ -653,7 +653,7 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; protected FakeResult(long version, long seqNo, long primaryTerm, boolean created, Translog.Location location) { - super(version, seqNo, primaryTerm, created); + super(version, seqNo, created); this.location = location; } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 0be902cae1b3d..88d8a075e1b3e 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -161,7 +161,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, false)); + compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index eb7985002a324..dfb8efb9ab943 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2074,7 +2074,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); - Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, primaryTerm, true); + Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); BytesStreamOutput out = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d96350eec388b..40a92b11e7372 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -170,12 +170,12 @@ public void testSendSnapshotSendsOps() throws IOException { final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true))); } final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64); for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, 1, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true))); } operations.add(null); int totalOperations = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() { From cf38f7492fc5324c69020b416cbc6f37f8b4b07a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 11:58:58 -0400 Subject: [PATCH 06/17] Revert "Remove one more usage" This reverts commit 64772ae0b301a7dcfbeff303fc5f685292c80dc9. --- core/src/main/java/org/elasticsearch/index/engine/Engine.java | 2 +- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- .../action/bulk/TransportShardBulkActionTests.java | 4 ++-- .../index/shard/IndexingOperationListenerTests.java | 2 +- .../java/org/elasticsearch/index/translog/TranslogTests.java | 2 +- .../indices/recovery/RecoverySourceHandlerTests.java | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 122587949e319..d9762afd22c91 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -387,7 +387,7 @@ public static class IndexResult extends Result { private final boolean created; - public IndexResult(long version, long seqNo, boolean created) { + public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { super(Operation.TYPE.INDEX, version, seqNo); this.created = created; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index bc08ba474956b..5e47331a78a42 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -612,7 +612,7 @@ public IndexResult index(Index index) throws IOException { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( - plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); + plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); } if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location; @@ -738,7 +738,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); - return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); + return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { /* There is no tragic event recorded so this must be a document failure. diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 1459d32fe3351..a444f0d6c6cd1 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -617,7 +617,7 @@ public void verifyMappings(Engine.Index operation, public class IndexResultWithLocation extends Engine.IndexResult { private final Translog.Location location; public IndexResultWithLocation(long version, long seqNo, long primaryTerm, boolean created, Translog.Location newLocation) { - super(version, seqNo, created); + super(version, seqNo, primaryTerm, created); this.location = newLocation; } @@ -653,7 +653,7 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; protected FakeResult(long version, long seqNo, long primaryTerm, boolean created, Translog.Location location) { - super(version, seqNo, created); + super(version, seqNo, primaryTerm, created); this.location = location; } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 88d8a075e1b3e..0be902cae1b3d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -161,7 +161,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); + compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index dfb8efb9ab943..eb7985002a324 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2074,7 +2074,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); - Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); + Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, primaryTerm, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); BytesStreamOutput out = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 40a92b11e7372..d96350eec388b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -170,12 +170,12 @@ public void testSendSnapshotSendsOps() throws IOException { final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true))); } final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64); for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, 1, true))); } operations.add(null); int totalOperations = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() { From 6c17c2ce07330262a573435f227865fa7524d0ea Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 11:59:07 -0400 Subject: [PATCH 07/17] Revert "Merge branch 'master' into doc-write-response-primary-term" This reverts commit b09e1238f942be2a14855eb639f4cf83f83a6f7b, reversing changes made to d4bbfcde9713a234d2e82d6ac5dba97ab696adf9. --- TESTING.asciidoc | 3 +- Vagrantfile | 4 + .../gradle/vagrant/VagrantTestPlugin.groovy | 1 + .../segments/IndicesSegmentResponse.java | 27 -- .../admin/indices/shrink/ShrinkRequest.java | 3 - .../action/bulk/BulkItemRequest.java | 9 +- .../action/bulk/BulkItemResponse.java | 46 +--- .../action/bulk/TransportShardBulkAction.java | 189 ++++---------- .../replication/TransportWriteAction.java | 4 +- .../metadata/MetaDataCreateIndexService.java | 18 +- .../common/settings/IndexScopedSettings.java | 6 +- .../discovery/zen/ElectMasterService.java | 3 +- .../org/elasticsearch/index/IndexService.java | 29 +- .../elasticsearch/index/IndexSettings.java | 17 +- .../elasticsearch/index/IndexSortConfig.java | 247 ------------------ .../elasticsearch/index/engine/Engine.java | 31 +-- .../index/engine/EngineConfig.java | 13 +- .../index/engine/InternalEngine.java | 33 +-- .../elasticsearch/index/engine/Segment.java | 121 +-------- .../index/mapper/MapperService.java | 8 - .../elasticsearch/index/shard/IndexShard.java | 53 ++-- .../index/shard/StoreRecovery.java | 27 +- .../shard/TranslogRecoveryPerformer.java | 4 +- .../bucketmetrics/BucketMetricValue.java | 27 -- .../InternalBucketMetricValue.java | 3 +- .../admin/indices/create/ShrinkIndexIT.java | 82 ------ .../bulk/TransportShardBulkActionTests.java | 65 ++--- .../org/elasticsearch/index/IndexSortIT.java | 131 ---------- .../index/IndexSortSettingsTests.java | 160 ------------ .../index/engine/InternalEngineTests.java | 78 +----- .../index/engine/SegmentTests.java | 114 -------- .../index/mapper/MapperServiceTests.java | 22 -- .../ESIndexLevelReplicationTestCase.java | 114 ++++---- .../IndexLevelReplicationTests.java | 145 +--------- .../RecoveryDuringReplicationTests.java | 6 +- .../index/shard/IndexShardIT.java | 2 +- .../index/shard/RefreshListenersTests.java | 2 +- .../index/shard/StoreRecoveryTests.java | 34 +-- .../index/translog/TranslogTests.java | 14 +- .../indices/cluster/ClusterStateChanges.java | 1 - .../InternalPercentilesTestCase.java | 61 ----- .../hdr/InternalHDRPercentilesTests.java | 60 ----- .../InternalTDigestPercentilesTests.java | 30 ++- docs/plugins/discovery-azure-classic.asciidoc | 2 +- .../index-modules/index-sorting.asciidoc | 107 -------- docs/reference/ingest/ingest-node.asciidoc | 61 +---- .../mapping/fields/field-names-field.asciidoc | 4 +- docs/reference/setup/install/docker.asciidoc | 5 +- .../painless/CompilerSettings.java | 3 +- .../painless/ArrayLikeObjectTestCase.java | 4 +- .../painless/BasicExpressionTests.java | 4 +- .../painless/ImplementInterfacesTests.java | 22 +- .../elasticsearch/painless/LambdaTests.java | 4 +- .../elasticsearch/painless/RegexTests.java | 7 +- .../painless/ScriptTestCase.java | 38 --- .../elasticsearch/painless/StringTests.java | 4 +- .../painless/WhenThingsGoWrongTests.java | 121 +++------ .../elasticsearch/backwards/IndexingIT.java | 42 ++- .../test/indices.sort/10_basic.yaml | 75 ------ .../index/shard/IndexShardTestCase.java | 2 +- .../elasticsearch/test/ESIntegTestCase.java | 22 -- 61 files changed, 363 insertions(+), 2211 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/IndexSortConfig.java delete mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java delete mode 100644 core/src/test/java/org/elasticsearch/index/IndexSortIT.java delete mode 100644 core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java delete mode 100644 core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java delete mode 100644 core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java delete mode 100644 core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java delete mode 100644 docs/reference/index-modules/index-sorting.asciidoc delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 216100c07da39..43b53fd360f39 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -345,6 +345,7 @@ VM running trusty by running These are the linux flavors the Vagrantfile currently supports: +* ubuntu-1204 aka precise * ubuntu-1404 aka trusty * ubuntu-1604 aka xenial * debian-8 aka jessie, the current debian stable distribution @@ -430,7 +431,7 @@ gradle vagrantFedora24#up ------------------------------------------------- Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up, -vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1404#up, +vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1204#up, vagrantUbuntu1604#up. Once up, you can then connect to the VM using SSH from the elasticsearch directory: diff --git a/Vagrantfile b/Vagrantfile index f008b339c3fa4..044394424047c 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -22,6 +22,10 @@ # under the License. Vagrant.configure(2) do |config| + config.vm.define "ubuntu-1204" do |config| + config.vm.box = "elastic/ubuntu-12.04-x86_64" + ubuntu_common config + end config.vm.define "ubuntu-1404" do |config| config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 2fb047e93051d..336ee207abfb9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -22,6 +22,7 @@ class VagrantTestPlugin implements Plugin { 'oel-7', 'opensuse-13', 'sles-12', + 'ubuntu-1204', 'ubuntu-1404', 'ubuntu-1604' ] diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 43b1033044c8c..ed9463d1544e1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -19,10 +19,6 @@ package org.elasticsearch.action.admin.indices.segments; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -41,7 +37,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Locale; public class IndicesSegmentResponse extends BroadcastResponse implements ToXContent { @@ -145,9 +140,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (segment.getMergeId() != null) { builder.field(Fields.MERGE_ID, segment.getMergeId()); } - if (segment.getSegmentSort() != null) { - toXContent(builder, segment.getSegmentSort()); - } if (segment.ramTree != null) { builder.startArray(Fields.RAM_TREE); for (Accountable child : segment.ramTree.getChildResources()) { @@ -172,25 +164,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static void toXContent(XContentBuilder builder, Sort sort) throws IOException { - builder.startArray("sort"); - for (SortField field : sort.getSort()) { - builder.startObject(); - builder.field("field", field.getField()); - if (field instanceof SortedNumericSortField) { - builder.field("mode", ((SortedNumericSortField) field).getSelector() - .toString().toLowerCase(Locale.ROOT)); - } else if (field instanceof SortedSetSortField) { - builder.field("mode", ((SortedSetSortField) field).getSelector() - .toString().toLowerCase(Locale.ROOT)); - } - builder.field("missing", field.getMissingValue()); - builder.field("reverse", field.getReverse()); - builder.endObject(); - } - builder.endArray(); - } - static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java index 6ea58200a4500..faa0a63c54dcf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java @@ -66,9 +66,6 @@ public ActionRequestValidationException validate() { if (shrinkIndexRequest == null) { validationException = addValidationError("shrink index request is missing", validationException); } - if (shrinkIndexRequest.settings().getByPrefix("index.sort.").isEmpty() == false) { - validationException = addValidationError("can't override index sort when shrinking index", validationException); - } return validationException; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 50da1476f49f3..3023ecb1856a4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -38,8 +38,7 @@ public class BulkItemRequest implements Streamable { } - // NOTE: public for testing only - public BulkItemRequest(int id, DocWriteRequest request) { + protected BulkItemRequest(int id, DocWriteRequest request) { this.id = id; this.request = request; } @@ -57,11 +56,13 @@ public String index() { return request.indices()[0]; } - BulkItemResponse getPrimaryResponse() { + // NOTE: protected for testing only + protected BulkItemResponse getPrimaryResponse() { return primaryResponse; } - void setPrimaryResponse(BulkItemResponse primaryResponse) { + // NOTE: protected for testing only + protected void setPrimaryResponse(BulkItemResponse primaryResponse) { this.primaryResponse = primaryResponse; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 68cede5d25178..2e2a7f1540108 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -37,8 +37,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -173,34 +171,17 @@ public static class Failure implements Writeable, ToXContent { private final String id; private final Exception cause; private final RestStatus status; - private final long seqNo; - /** - * For write failures before operation was assigned a sequence number. - * - * use @{link {@link #Failure(String, String, String, Exception, long)}} - * to record operation sequence no with failure - */ - public Failure(String index, String type, String id, Exception cause) { - this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbersService.UNASSIGNED_SEQ_NO); - } - - public Failure(String index, String type, String id, Exception cause, RestStatus status) { - this(index, type, id, cause, status, SequenceNumbersService.UNASSIGNED_SEQ_NO); - } - - /** For write failures after operation was assigned a sequence number. */ - public Failure(String index, String type, String id, Exception cause, long seqNo) { - this(index, type, id, cause, ExceptionsHelper.status(cause), seqNo); - } - - public Failure(String index, String type, String id, Exception cause, RestStatus status, long seqNo) { + Failure(String index, String type, String id, Exception cause, RestStatus status) { this.index = index; this.type = type; this.id = id; this.cause = cause; this.status = status; - this.seqNo = seqNo; + } + + public Failure(String index, String type, String id, Exception cause) { + this(index, type, id, cause, ExceptionsHelper.status(cause)); } /** @@ -212,11 +193,6 @@ public Failure(StreamInput in) throws IOException { id = in.readOptionalString(); cause = in.readException(); status = ExceptionsHelper.status(cause); - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - seqNo = in.readZLong(); - } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - } } @Override @@ -225,9 +201,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(getType()); out.writeOptionalString(getId()); out.writeException(getCause()); - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - out.writeZLong(getSeqNo()); - } } @@ -273,15 +246,6 @@ public Exception getCause() { return cause; } - /** - * The operation sequence number generated by primary - * NOTE: {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} - * indicates sequence number was not generated by primary - */ - public long getSeqNo() { - return seqNo; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(INDEX_FIELD, index); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 42010fdba77a8..e6f9516925878 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -44,6 +43,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -65,9 +65,13 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.action.bulk.BulkItemResultHolder; +import org.elasticsearch.action.bulk.BulkItemResponse; import java.io.IOException; import java.util.Map; +import java.util.Objects; import java.util.function.LongSupplier; /** Performs shard-level bulk (index, delete or update) operations */ @@ -109,20 +113,12 @@ protected boolean resolveIndex() { @Override public WritePrimaryResult shardOperationOnPrimary( BulkShardRequest request, IndexShard primary) throws Exception { - return performOnPrimary(request, primary, updateHelper, threadPool::absoluteTimeInMillis, new ConcreteMappingUpdatePerformer()); - } - - public static WritePrimaryResult performOnPrimary( - BulkShardRequest request, - IndexShard primary, - UpdateHelper updateHelper, - LongSupplier nowInMillisSupplier, - MappingUpdatePerformer mappingUpdater) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); Translog.Location location = null; + final MappingUpdatePerformer mappingUpdater = new ConcreteMappingUpdatePerformer(); for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { location = executeBulkItemRequest(metaData, primary, request, location, requestIndex, - updateHelper, nowInMillisSupplier, mappingUpdater); + updateHelper, threadPool::absoluteTimeInMillis, mappingUpdater); } BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); @@ -133,6 +129,7 @@ public static WritePrimaryResult performOnP return new WritePrimaryResult<>(request, response, location, null, primary, logger); } + private static BulkItemResultHolder executeIndexRequest(final IndexRequest indexRequest, final BulkItemRequest bulkItemRequest, final IndexShard primary, @@ -211,8 +208,7 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul // Make sure to use request.index() here, if you // use docWriteRequest.index() it will use the // concrete index instead of an alias if used! - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), - failure, operationResult.getSeqNo())); + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)); } else { assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response"; return null; @@ -225,7 +221,7 @@ static Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexSha BulkShardRequest request, Translog.Location location, int requestIndex, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, - final MappingUpdatePerformer mappingUpdater) throws Exception { + final MappingUpdatePerformer mappingUpdater) throws Exception { final DocWriteRequest itemRequest = request.items()[requestIndex].request(); final DocWriteRequest.OpType opType = itemRequest.opType(); final BulkItemResultHolder responseHolder; @@ -376,129 +372,58 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq return new BulkItemResultHolder(updateResponse, result, replicaRequest); } - /** Modes for executing item request on replica depending on corresponding primary execution result */ - public enum ReplicaItemExecutionMode { - - /** - * When primary execution succeeded - */ - NORMAL, - - /** - * When primary execution failed before sequence no was generated - * or primary execution was a noop (only possible when request is originating from pre-6.0 nodes) - */ - NOOP, - - /** - * When primary execution failed after sequence no was generated - */ - FAILURE - } - - static { - assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false: - "Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" + - " as the current minimum compatible version [" + - Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0"; - } - - /** - * Determines whether a bulk item request should be executed on the replica. - * @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures - * {@link ReplicaItemExecutionMode#FAILURE} upon primary execution failure after sequence no generation - * {@link ReplicaItemExecutionMode#NOOP} upon primary execution failure before sequence no generation or - * when primary execution resulted in noop (only possible for write requests from pre-6.0 nodes) - */ - static ReplicaItemExecutionMode replicaItemExecutionMode(final BulkItemRequest request, final int index) { + static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) { final BulkItemResponse primaryResponse = request.getPrimaryResponse(); - assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request [" + request.request() + "]"; - if (primaryResponse.isFailed()) { - return primaryResponse.getFailure().getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO - ? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op - : ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication - } else { - // NOTE: write requests originating from pre-6.0 nodes can send a no-op operation to - // the replica; we ignore replication - // TODO: remove noOp result check from primary response, when pre-6.0 nodes are not supported - // we should return ReplicationItemExecutionMode.NORMAL instead - return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP - ? ReplicaItemExecutionMode.NORMAL // execution successful on primary - : ReplicaItemExecutionMode.NOOP; // ignore replication - } + assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request ["+ request.request() +"]"; + return primaryResponse.isFailed() == false && + primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP; } @Override public WriteReplicaResult shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { - final Translog.Location location = performOnReplica(request, replica); - return new WriteReplicaResult<>(request, location, null, replica, logger); - } - - public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; - final Engine.Result operationResult; - DocWriteRequest docWriteRequest = item.request(); - try { - switch (replicaItemExecutionMode(item, i)) { - case NORMAL: - final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); - switch (docWriteRequest.opType()) { - case CREATE: - case INDEX: - operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica); - break; - case DELETE: - operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica); - break; - default: - throw new IllegalStateException("Unexpected request operation type on replica: " - + docWriteRequest.opType().getLowercase()); + if (shouldExecuteReplicaItem(item, i)) { + DocWriteRequest docWriteRequest = item.request(); + DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); + final Engine.Result operationResult; + try { + switch (docWriteRequest.opType()) { + case CREATE: + case INDEX: + operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica); + break; + case DELETE: + operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica); + break; + default: + throw new IllegalStateException("Unexpected request operation type on replica: " + + docWriteRequest.opType().getLowercase()); + } + if (operationResult.hasFailure()) { + // check if any transient write operation failures should be bubbled up + Exception failure = operationResult.getFailure(); + assert failure instanceof VersionConflictEngineException + || failure instanceof MapperParsingException + : "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" + + " failures. got " + failure; + if (!TransportActions.isShardNotAvailableException(failure)) { + throw failure; } - assert operationResult != null : "operation result must never be null when primary response has no failure"; - location = syncOperationResultOrThrow(operationResult, location); - break; - case NOOP: - break; - case FAILURE: - final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure(); - assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned"; - operationResult = executeFailureNoOpOnReplica(failure, replica); - assert operationResult != null : "operation result must never be null when primary response has no failure"; - location = syncOperationResultOrThrow(operationResult, location); - break; - default: - throw new IllegalStateException("illegal replica item execution mode for: " + item.request()); - } - } catch (Exception e) { - // if its not an ignore replica failure, we need to make sure to bubble up the failure - // so we will fail the shard - if (!TransportActions.isShardNotAvailableException(e)) { - throw e; + } else { + location = locationToSync(location, operationResult.getTranslogLocation()); + } + } catch (Exception e) { + // if its not an ignore replica failure, we need to make sure to bubble up the failure + // so we will fail the shard + if (!TransportActions.isShardNotAvailableException(e)) { + throw e; + } } } } - return location; - } - - /** Syncs operation result to the translog or throws a shard not available failure */ - private static Translog.Location syncOperationResultOrThrow(final Engine.Result operationResult, - final Translog.Location currentLocation) throws Exception { - final Translog.Location location; - if (operationResult.hasFailure()) { - // check if any transient write operation failures should be bubbled up - Exception failure = operationResult.getFailure(); - assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure; - if (!TransportActions.isShardNotAvailableException(failure)) { - throw failure; - } else { - location = currentLocation; - } - } else { - location = locationToSync(currentLocation, operationResult.getTranslogLocation()); - } - return location; + return new WriteReplicaResult<>(request, location, null, replica, logger); } private static Translog.Location locationToSync(Translog.Location current, @@ -518,7 +443,7 @@ private static Translog.Location locationToSync(Translog.Location current, * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - private static Engine.IndexResult executeIndexRequestOnReplica( + public static Engine.IndexResult executeIndexRequestOnReplica( DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException { @@ -561,7 +486,7 @@ static Engine.Index prepareIndexOperationOnReplica( } /** Utility method to prepare an index operation on primary shards */ - private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { + static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { final SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(), request.getContentType()) @@ -571,8 +496,8 @@ private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, } /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */ - static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, - MappingUpdatePerformer mappingUpdater) throws Exception { + public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, + MappingUpdatePerformer mappingUpdater) throws Exception { // Update the mappings if parsing the documents includes new dynamic updates final Engine.Index preUpdateOperation; final Mapping mappingUpdate; @@ -622,12 +547,6 @@ private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteRespons return replica.delete(delete); } - private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, IndexShard replica) throws IOException { - final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOp( - primaryFailure.getSeqNo(), primaryFailure.getMessage()); - return replica.markSeqNoAsNoOp(noOp); - } - class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { public void updateMappings(final Mapping update, final ShardId shardId, diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 938e90b82b2fb..ae4ae78c03386 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -94,10 +94,8 @@ protected abstract WriteReplicaResult shardOperationOnReplica( /** * Result of taking the action on the primary. - * - * NOTE: public for testing */ - public static class WritePrimaryResult, + protected static class WritePrimaryResult, Response extends ReplicationResponse & WriteResponse> extends PrimaryResult implements RespondingWriteResult { boolean finishedAsyncActions; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index a3292e2cfd445..2cb93373700f3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -374,18 +374,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { throw e; } - if (request.shrinkFrom() == null) { - // now that the mapping is merged we can validate the index sort. - // we cannot validate for index shrinking since the mapping is empty - // at this point. The validation will take place later in the process - // (when all shards are copied in a single place). - indexService.getIndexSortSupplier().get(); - } - // the context is only used for validation so it's fine to pass fake values for the shard id and the current // timestamp final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); - for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry); @@ -590,11 +581,10 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, static void prepareShrinkIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, Index shrinkFromIndex, String shrinkIntoName) { final IndexMetaData sourceMetaData = currentState.metaData().index(shrinkFromIndex.getName()); - final List nodesToAllocateOn = validateShrinkIndex(currentState, shrinkFromIndex.getName(), mappingKeys, shrinkIntoName, indexSettingsBuilder.build()); - final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") - || s.startsWith("index.analysis.") || s.startsWith("index.sort."); + final Predicate analysisSimilarityPredicate = (s) -> s.startsWith("index.similarity.") + || s.startsWith("index.analysis."); indexSettingsBuilder // we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away // once we are allocated. @@ -602,11 +592,11 @@ static void prepareShrinkIndexSettings(ClusterState currentState, Set ma Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) // we only try once and then give up with a shrink index .put("index.allocation.max_retries", 1) - // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they + // now copy all similarity / analysis settings - this overrides all settings from the user unless they // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) - .put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) + .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate)) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 4094d69eddeb8..efbe7acf5e1b6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.common.settings; -import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingSlowLog; @@ -100,10 +100,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, - IndexSortConfig.INDEX_SORT_FIELD_SETTING, - IndexSortConfig.INDEX_SORT_ORDER_SETTING, - IndexSortConfig.INDEX_SORT_MISSING_SETTING, - IndexSortConfig.INDEX_SORT_MODE_SETTING, IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING, IndexSettings.INDEX_WARMER_ENABLED_SETTING, IndexSettings.INDEX_REFRESH_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 024c50fb6e090..92b20c5199b89 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -150,8 +150,7 @@ public DiscoveryNode tieBreakActiveMasters(Collection activeMaste } public boolean hasEnoughMasterNodes(Iterable nodes) { - final int count = countMasterNodes(nodes); - return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes); + return minimumMasterNodes < 1 || countMasterNodes(nodes) >= minimumMasterNodes; } public boolean hasTooManyMasterNodes(Iterable nodes) { diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 9a24f8eb68df7..e528dde7179b9 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -20,8 +20,8 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; @@ -84,7 +84,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.LongSupplier; -import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -120,7 +119,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ScriptService scriptService; private final ClusterService clusterService; private final Client client; - private Supplier indexSortSupplier; public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, NamedXContentRegistry xContentRegistry, @@ -155,16 +153,6 @@ public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp"); })); this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); - if (indexSettings.getIndexSortConfig().hasIndexSort()) { - // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. - // The sort order is validated right after the merge of the mapping later in the process. - this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( - mapperService::fullName, - indexFieldData::getForField - ); - } else { - this.indexSortSupplier = () -> null; - } this.shardStoreDeleter = shardStoreDeleter; this.bigArrays = bigArrays; this.threadPool = threadPool; @@ -255,10 +243,6 @@ public SimilarityService similarityService() { return similarityService; } - public Supplier getIndexSortSupplier() { - return indexSortSupplier; - } - public synchronized void close(final String reason, boolean delete) throws IOException { if (closed.compareAndSet(false, true)) { deleted.compareAndSet(false, delete); @@ -366,10 +350,10 @@ public synchronized IndexShard createShard(ShardRouting routing) throws IOExcept }; store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); - indexShard = new IndexShard(routing, this.indexSettings, path, store, indexSortSupplier, - indexCache, mapperService, similarityService, indexFieldData, engineFactory, - eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, - () -> globalCheckpointSyncer.accept(shardId), searchOperationListeners, indexingOperationListeners); + indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, + indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, + () -> globalCheckpointSyncer.accept(shardId), + searchOperationListeners, indexingOperationListeners); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); @@ -417,8 +401,7 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 8acdf7d1360cb..011229256af65 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -98,11 +98,11 @@ public final class IndexSettings { Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, Property.Dynamic, Property.IndexScope); /** * Index setting describing the maximum number of filters clauses that can be used - * in an adjacency_matrix aggregation. The max number of buckets produced by + * in an adjacency_matrix aggregation. The max number of buckets produced by * N filters is (N*N)/2 so a limit of 100 filters is imposed by default. */ public static final Setting MAX_ADJACENCY_MATRIX_FILTERS_SETTING = - Setting.intSetting("index.max_adjacency_matrix_filters", 100, 2, Property.Dynamic, Property.IndexScope); + Setting.intSetting("index.max_adjacency_matrix_filters", 100, 2, Property.Dynamic, Property.IndexScope); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), @@ -176,7 +176,6 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; private final MergePolicyConfig mergePolicyConfig; - private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); private volatile boolean warmerEnabled; @@ -279,7 +278,6 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); this.mergePolicyConfig = new MergePolicyConfig(logger, this); - this.indexSortConfig = new IndexSortConfig(this); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed); @@ -501,7 +499,7 @@ public int getMaxResultWindow() { private void setMaxResultWindow(int maxResultWindow) { this.maxResultWindow = maxResultWindow; } - + /** * Returns the max number of filters in adjacency_matrix aggregation search requests */ @@ -511,7 +509,7 @@ public int getMaxAdjacencyMatrixFilters() { private void setMaxAdjacencyMatrixFilters(int maxAdjacencyFilters) { this.maxAdjacencyMatrixFilters = maxAdjacencyFilters; - } + } /** * Returns the maximum rescore window for search requests. @@ -576,12 +574,5 @@ private void setMaxSlicesPerScroll(int value) { this.maxSlicesPerScroll = value; } - /** - * Returns the index sort config that should be used for this index. - */ - public IndexSortConfig getIndexSortConfig() { - return indexSortConfig; - } - public IndexScopedSettings getScopedSettings() { return scopedSettings;} } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java deleted file mode 100644 index 1d3f5f0fc23ea..0000000000000 --- a/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.Version; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.sort.SortOrder; - -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.function.Function; - -/** - * Holds all the information that is used to build the sort order of an index. - * - * The index sort settings are final and can be defined only at index creation. - * These settings are divided in four lists that are merged during the initialization of this class: - *
    - *
  • `index.sort.field`: the field or a list of field to use for the sort
  • - *
  • `index.sort.order` the {@link SortOrder} to use for the field or a list of {@link SortOrder} - * for each field defined in `index.sort.field`. - *
  • - *
  • `index.sort.mode`: the {@link MultiValueMode} to use for the field or a list of orders - * for each field defined in `index.sort.field`. - *
  • - *
  • `index.sort.missing`: the missing value to use for the field or a list of missing values - * for each field defined in `index.sort.field` - *
  • - *
- * -**/ -public final class IndexSortConfig { - /** - * The list of field names - */ - public static final Setting> INDEX_SORT_FIELD_SETTING = - Setting.listSetting("index.sort.field", Collections.emptyList(), - Function.identity(), Setting.Property.IndexScope, Setting.Property.Final); - - /** - * The {@link SortOrder} for each specified sort field (ie. asc or desc). - */ - public static final Setting> INDEX_SORT_ORDER_SETTING = - Setting.listSetting("index.sort.order", Collections.emptyList(), - IndexSortConfig::parseOrderMode, Setting.Property.IndexScope, Setting.Property.Final); - - - /** - * The {@link MultiValueMode} for each specified sort field (ie. max or min). - */ - public static final Setting> INDEX_SORT_MODE_SETTING = - Setting.listSetting("index.sort.mode", Collections.emptyList(), - IndexSortConfig::parseMultiValueMode, Setting.Property.IndexScope, Setting.Property.Final); - - /** - * The missing value for each specified sort field (ie. _first or _last) - */ - public static final Setting> INDEX_SORT_MISSING_SETTING = - Setting.listSetting("index.sort.missing", Collections.emptyList(), - IndexSortConfig::validateMissingValue, Setting.Property.IndexScope, Setting.Property.Final); - - private static String validateMissingValue(String missing) { - if ("_last".equals(missing) == false && "_first".equals(missing) == false) { - throw new IllegalArgumentException("Illegal missing value:[" + missing + "], " + - "must be one of [_last, _first]"); - } - return missing; - } - - private static SortOrder parseOrderMode(String value) { - try { - return SortOrder.fromString(value); - } catch (Exception e) { - throw new IllegalArgumentException("Illegal sort order:" + value); - } - } - - private static MultiValueMode parseMultiValueMode(String value) { - MultiValueMode mode = MultiValueMode.fromString(value); - if (mode != MultiValueMode.MAX && mode != MultiValueMode.MIN) { - throw new IllegalArgumentException("Illegal index sort mode:[" + mode + "], " + - "must be one of [" + MultiValueMode.MAX + ", " + MultiValueMode.MIN + "]"); - } - return mode; - } - - // visible for tests - final FieldSortSpec[] sortSpecs; - - public IndexSortConfig(IndexSettings indexSettings) { - final Settings settings = indexSettings.getSettings(); - List fields = INDEX_SORT_FIELD_SETTING.get(settings); - this.sortSpecs = fields.stream() - .map((name) -> new FieldSortSpec(name)) - .toArray(FieldSortSpec[]::new); - - if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED)) { - /** - * This index might be assigned to a node where the index sorting feature is not available - * (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later. - */ - throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() + - ", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1_UNRELEASED); - } - - if (INDEX_SORT_ORDER_SETTING.exists(settings)) { - List orders = INDEX_SORT_ORDER_SETTING.get(settings); - if (orders.size() != sortSpecs.length) { - throw new IllegalArgumentException("index.sort.field:" + fields + - " index.sort.order:" + orders.toString() + ", size mismatch"); - } - for (int i = 0; i < sortSpecs.length; i++) { - sortSpecs[i].order = orders.get(i); - } - } - - if (INDEX_SORT_MODE_SETTING.exists(settings)) { - List modes = INDEX_SORT_MODE_SETTING.get(settings); - if (modes.size() != sortSpecs.length) { - throw new IllegalArgumentException("index.sort.field:" + fields + - " index.sort.mode:" + modes + ", size mismatch"); - } - for (int i = 0; i < sortSpecs.length; i++) { - sortSpecs[i].mode = modes.get(i); - } - } - - if (INDEX_SORT_MISSING_SETTING.exists(settings)) { - List missingValues = INDEX_SORT_MISSING_SETTING.get(settings); - if (missingValues.size() != sortSpecs.length) { - throw new IllegalArgumentException("index.sort.field:" + fields + - " index.sort.missing:" + missingValues + ", size mismatch"); - } - for (int i = 0; i < sortSpecs.length; i++) { - sortSpecs[i].missingValue = missingValues.get(i); - } - } - } - - - /** - * Returns true if the index should be sorted - */ - public boolean hasIndexSort() { - return sortSpecs.length > 0; - } - - /** - * Builds the {@link Sort} order from the settings for this index - * or returns null if this index has no sort. - */ - public Sort buildIndexSort(Function fieldTypeLookup, - Function> fieldDataLookup) { - if (hasIndexSort() == false) { - return null; - } - - final SortField[] sortFields = new SortField[sortSpecs.length]; - for (int i = 0; i < sortSpecs.length; i++) { - FieldSortSpec sortSpec = sortSpecs[i]; - final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); - if (ft == null) { - throw new IllegalArgumentException("unknown index sort field:[" + sortSpec.field + "]"); - } - boolean reverse = sortSpec.order == null ? false : (sortSpec.order == SortOrder.DESC); - MultiValueMode mode = sortSpec.mode; - if (mode == null) { - mode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; - } - IndexFieldData fieldData; - try { - fieldData = fieldDataLookup.apply(ft); - } catch (Exception e) { - throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); - } - if (fieldData == null) { - throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); - } - sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); - validateIndexSortField(sortFields[i]); - } - return new Sort(sortFields); - } - - private void validateIndexSortField(SortField sortField) { - SortField.Type type = getSortFieldType(sortField); - if (ALLOWED_INDEX_SORT_TYPES.contains(type) == false) { - throw new IllegalArgumentException("invalid index sort field:[" + sortField.getField() + "]"); - } - } - - static class FieldSortSpec { - final String field; - SortOrder order; - MultiValueMode mode; - String missingValue; - - FieldSortSpec(String field) { - this.field = field; - } - } - - /** We only allow index sorting on these types */ - private static final EnumSet ALLOWED_INDEX_SORT_TYPES = EnumSet.of( - SortField.Type.STRING, - SortField.Type.LONG, - SortField.Type.INT, - SortField.Type.DOUBLE, - SortField.Type.FLOAT - ); - - static SortField.Type getSortFieldType(SortField sortField) { - if (sortField instanceof SortedSetSortField) { - return SortField.Type.STRING; - } else if (sortField instanceof SortedNumericSortField) { - return ((SortedNumericSortField) sortField).getNumericType(); - } else { - return sortField.getType(); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index d9762afd22c91..31ba05817b756 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -363,6 +363,7 @@ public Operation.TYPE getOperationType() { void setTranslogLocation(Translog.Location translogLocation) { if (freeze.get() == null) { + assert failure == null : "failure has to be null to set translog location"; this.translogLocation = translogLocation; } else { throw new IllegalStateException("result is already frozen"); @@ -431,7 +432,7 @@ public boolean isFound() { } - public static class NoOpResult extends Result { + static class NoOpResult extends Result { NoOpResult(long seqNo) { super(Operation.TYPE.NO_OP, 0, seqNo); @@ -705,7 +706,6 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole } final SegmentReader segmentReader = segmentReader(reader.reader()); segment.memoryInBytes = segmentReader.ramBytesUsed(); - segment.segmentSort = info.info.getIndexSort(); if (verbose) { segment.ramTree = Accountables.namedAccountable("root", segmentReader); } @@ -1154,31 +1154,24 @@ public String reason() { return reason; } - public NoOp(final long seqNo, final long primaryTerm, final Origin origin, final long startTime, final String reason) { - super(null, seqNo, primaryTerm, Versions.NOT_FOUND, null, origin, startTime); + public NoOp( + final Term uid, + final long seqNo, + final long primaryTerm, + final long version, + final VersionType versionType, + final Origin origin, + final long startTime, + final String reason) { + super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); this.reason = reason; } - @Override - public Term uid() { - throw new UnsupportedOperationException(); - } - @Override public String type() { throw new UnsupportedOperationException(); } - @Override - public long version() { - throw new UnsupportedOperationException(); - } - - @Override - public VersionType versionType() { - throw new UnsupportedOperationException(); - } - @Override String id() { throw new UnsupportedOperationException(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 7852d2c2db089..60dddc4d40db1 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.Sort; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; @@ -70,8 +69,6 @@ public final class EngineConfig { private final long maxUnsafeAutoIdTimestamp; @Nullable private final ReferenceManager.RefreshListener refreshListeners; - @Nullable - private final Sort indexSort; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -116,7 +113,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter, ReferenceManager.RefreshListener refreshListeners, - long maxUnsafeAutoIdTimestamp, Sort indexSort) { + long maxUnsafeAutoIdTimestamp) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -146,7 +143,6 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP : "maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp; this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp; - this.indexSort = indexSort; } /** @@ -339,11 +335,4 @@ public ReferenceManager.RefreshListener getRefreshListeners() { public long getMaxUnsafeAutoIdTimestamp() { return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS) ? maxUnsafeAutoIdTimestamp : Long.MAX_VALUE; } - - /** - * Return the sort order of this index, or null if the index has no sort. - */ - public Sort getIndexSort() { - return indexSort; - } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5e47331a78a42..107430b0a7405 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -614,16 +614,10 @@ public IndexResult index(Index index) throws IOException { indexResult = new IndexResult( plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); } - if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - final Translog.Location location; - if (indexResult.hasFailure() == false) { - location = translog.add(new Translog.Index(index, indexResult)); - } else if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - // if we have document failure, record it as a no-op in the translog with the generated seq_no - location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().getMessage())); - } else { - location = null; - } + if (indexResult.hasFailure() == false && + index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + Translog.Location location = + translog.add(new Translog.Index(index, indexResult)); indexResult.setTranslogLocation(location); } if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -754,7 +748,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) * we return a `MATCH_ANY` version to indicate no document was index. The value is * not used anyway */ - return new IndexResult(ex, Versions.MATCH_ANY, plan.seqNoForIndexing); + return new IndexResult(ex, Versions.MATCH_ANY, index.seqNo()); } else { throw ex; } @@ -904,16 +898,10 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = new DeleteResult( plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } - if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - final Translog.Location location; - if (deleteResult.hasFailure() == false) { - location = translog.add(new Translog.Delete(delete, deleteResult)); - } else if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), - delete.primaryTerm(), deleteResult.getFailure().getMessage())); - } else { - location = null; - } + if (!deleteResult.hasFailure() && + delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + Translog.Location location = + translog.add(new Translog.Delete(delete, deleteResult)); deleteResult.setTranslogLocation(location); } if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -1602,9 +1590,6 @@ private IndexWriterConfig getIndexWriterConfig(boolean create) { iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh - if (config().getIndexSort() != null) { - iwc.setIndexSort(config().getIndexSort()); - } return iwc; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Segment.java b/core/src/main/java/org/elasticsearch/index/engine/Segment.java index 565ed9f1d83f5..7d3882fd9b654 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -19,15 +19,8 @@ package org.elasticsearch.index.engine; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,7 +46,6 @@ public class Segment implements Streamable { public Boolean compound = null; public String mergeId; public long memoryInBytes; - public Sort segmentSort; public Accountable ramTree = null; Segment() { @@ -121,13 +113,6 @@ public long getMemoryInBytes() { return this.memoryInBytes; } - /** - * Return the sort order of this segment, or null if the segment has no sort. - */ - public Sort getSegmentSort() { - return segmentSort; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -168,11 +153,6 @@ public void readFrom(StreamInput in) throws IOException { // verbose mode ramTree = readRamTree(in); } - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - segmentSort = readSegmentSort(in); - } else { - segmentSort = null; - } } @Override @@ -187,106 +167,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); out.writeLong(memoryInBytes); - + boolean verbose = ramTree != null; out.writeBoolean(verbose); if (verbose) { writeRamTree(out, ramTree); } - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - writeSegmentSort(out, segmentSort); - } - } - - Sort readSegmentSort(StreamInput in) throws IOException { - int size = in.readVInt(); - if (size == 0) { - return null; - } - SortField[] fields = new SortField[size]; - for (int i = 0; i < size; i++) { - String field = in.readString(); - byte type = in.readByte(); - if (type == 0) { - Boolean missingFirst = in.readOptionalBoolean(); - boolean max = in.readBoolean(); - boolean reverse = in.readBoolean(); - fields[i] = new SortedSetSortField(field, reverse, - max ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); - if (missingFirst != null) { - fields[i].setMissingValue(missingFirst ? - SortedSetSortField.STRING_FIRST : SortedSetSortField.STRING_LAST); - } - } else { - Object missing = in.readGenericValue(); - boolean max = in.readBoolean(); - boolean reverse = in.readBoolean(); - final SortField.Type numericType; - switch (type) { - case 1: - numericType = SortField.Type.INT; - break; - case 2: - numericType = SortField.Type.FLOAT; - break; - case 3: - numericType = SortField.Type.DOUBLE; - break; - case 4: - numericType = SortField.Type.LONG; - break; - default: - throw new IOException("invalid index sort type:[" + type + - "] for numeric field:[" + field + "]"); - } - fields[i] = new SortedNumericSortField(field, numericType, reverse, max ? - SortedNumericSelector.Type.MAX : SortedNumericSelector.Type.MIN); - if (missing != null) { - fields[i].setMissingValue(missing); - } - } - } - return new Sort(fields); - } - - void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { - if (sort == null) { - out.writeVInt(0); - return; - } - out.writeVInt(sort.getSort().length); - for (SortField field : sort.getSort()) { - out.writeString(field.getField()); - if (field instanceof SortedSetSortField) { - out.writeByte((byte) 0); - out.writeOptionalBoolean(field.getMissingValue() == null ? - null : field.getMissingValue() == SortField.STRING_FIRST); - out.writeBoolean(((SortedSetSortField) field).getSelector() == SortedSetSelector.Type.MAX); - out.writeBoolean(field.getReverse()); - } else if (field instanceof SortedNumericSortField) { - switch (((SortedNumericSortField) field).getNumericType()) { - case INT: - out.writeByte((byte) 1); - break; - case FLOAT: - out.writeByte((byte) 2); - break; - case DOUBLE: - out.writeByte((byte) 3); - break; - case LONG: - out.writeByte((byte) 4); - break; - default: - throw new IOException("invalid index sort field:" + field); - } - out.writeGenericValue(field.getMissingValue()); - out.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX); - out.writeBoolean(field.getReverse()); - } else { - throw new IOException("invalid index sort field:" + field + ""); - } - } } Accountable readRamTree(StreamInput in) throws IOException { @@ -302,7 +188,7 @@ Accountable readRamTree(StreamInput in) throws IOException { } return Accountables.namedAccountable(name, children, bytes); } - + // the ram tree is written recursively since the depth is fairly low (5 or 6) void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); @@ -328,7 +214,6 @@ public String toString() { ", compound=" + compound + ", mergeId='" + mergeId + '\'' + ", memoryInBytes=" + memoryInBytes + - (segmentSort != null ? ", sort=" + segmentSort : "") + '}'; } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 55cfebe41c1db..68983bcf63ff4 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.query.QueryShardContext; @@ -439,7 +438,6 @@ private synchronized Map internalMerge(@Nullable Documen checkNestedFieldsLimit(fullPathObjectMappers); checkDepthLimit(fullPathObjectMappers.keySet()); } - checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested); for (Map.Entry entry : mappers.entrySet()) { if (entry.getKey().equals(DEFAULT_MAPPING)) { @@ -621,12 +619,6 @@ private void checkPartitionedIndexConstraints(DocumentMapper newMapper) { } } - private void checkIndexSortCompatibility(IndexSortConfig sortConfig, boolean hasNested) { - if (sortConfig.hasIndexSort() && hasNested) { - throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); - } - } - public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d1ca4f13a42da..1dee58ced002b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -31,7 +31,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Lock; @@ -146,7 +145,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.function.Supplier; import java.util.stream.Collectors; public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { @@ -172,7 +170,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final TranslogConfig translogConfig; private final IndexEventListener indexEventListener; private final QueryCachingPolicy cachingPolicy; - private final Supplier indexSortSupplier; + /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this @@ -227,9 +225,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl */ private final RefreshListeners refreshListeners; - public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, - Supplier indexSortSupplier, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, - IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, + public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, + MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, + @Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, ThreadPool threadPool, BigArrays bigArrays, Engine.Warmer warmer, Runnable globalCheckpointSyncer, List searchOperationListener, List listeners) throws IOException { super(shardRouting.shardId(), indexSettings); @@ -243,7 +241,6 @@ public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardP Objects.requireNonNull(store, "Store must be provided to the index shard"); this.engineFactory = engineFactory == null ? new InternalEngineFactory() : engineFactory; this.store = store; - this.indexSortSupplier = indexSortSupplier; this.indexEventListener = indexEventListener; this.threadPool = threadPool; this.mapperService = mapperService; @@ -292,12 +289,6 @@ public Store store() { return this.store; } - /** - * Return the sort order of this index, or null if the index has no sort. - */ - public Sort getIndexSort() { - return indexSortSupplier.get(); - } /** * returns true if this shard supports indexing (i.e., write) operations. */ @@ -578,21 +569,12 @@ private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOExc return result; } - public Engine.NoOp prepareMarkingSeqNoAsNoOp(long seqNo, String reason) { - verifyReplicationTarget(); - long startTime = System.nanoTime(); - return new Engine.NoOp(seqNo, primaryTerm, Engine.Operation.Origin.REPLICA, startTime, reason); - } - - public Engine.NoOpResult markSeqNoAsNoOp(Engine.NoOp noOp) throws IOException { - ensureWriteAllowed(noOp); - Engine engine = getEngine(); - return engine.noOp(noOp); - } - public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) { verifyPrimary(); - final Term uid = extractUid(type, id); + final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); + final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); + final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); + final Term uid = MappedFieldType.extractTerm(uidQuery); return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, Engine.Operation.Origin.PRIMARY); } @@ -600,12 +582,15 @@ public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType) { verifyReplicationTarget(); - final Term uid = extractUid(type, id); + final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); + final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); + final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); + final Term uid = MappedFieldType.extractTerm(uidQuery); return prepareDelete(type, id, uid, seqNo, primaryTerm, version, versionType, Engine.Operation.Origin.REPLICA); } - private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, - VersionType versionType, Engine.Operation.Origin origin) { + static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, + VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime); } @@ -616,13 +601,6 @@ public Engine.DeleteResult delete(Engine.Delete delete) throws IOException { return delete(engine, delete); } - private Term extractUid(String type, String id) { - final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); - final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); - final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); - return MappedFieldType.extractTerm(uidQuery); - } - private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException { active.set(true); final Engine.DeleteResult result; @@ -1797,12 +1775,11 @@ private DocumentMapperForType docMapper(String type) { private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, long maxUnsafeAutoIdTimestamp) { final IndexShardRecoveryPerformer translogRecoveryPerformer = new IndexShardRecoveryPerformer(shardId, mapperService, logger); - Sort indexSort = indexSortSupplier.get(); return new EngineConfig(openMode, shardId, threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners, - maxUnsafeAutoIdTimestamp, indexSort); + maxUnsafeAutoIdTimestamp); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 6cfaca8c45b4b..04c2113dea34b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.search.Sort; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; @@ -110,14 +109,11 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate mappingUpdateConsumer.accept(mapping.key, mapping.value); } indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); - // now that the mapping is merged we can validate the index sort configuration. - Sort indexSort = indexShard.getIndexSort(); return executeRecovery(indexShard, () -> { logger.debug("starting recovery from local shards {}", shards); try { final Directory directory = indexShard.store().directory(); // don't close this directory!! - addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, - shards.stream().map(s -> s.getSnapshotDirectory()) + addIndices(indexShard.recoveryState().getIndex(), directory, shards.stream().map(s -> s.getSnapshotDirectory()) .collect(Collectors.toList()).toArray(new Directory[shards.size()])); internalRecoverFromStore(indexShard); // just trigger a merge to do housekeeping on the @@ -132,19 +128,16 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate return false; } - void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Sort indexSort, Directory... sources) throws IOException { + void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException { target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE); - if (indexSort != null) { - iwc.setIndexSort(indexSort); - } - try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), iwc)) { + try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), + new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { writer.addIndexes(sources); writer.commit(); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 8842cbf3c0bd4..d5aadc1664ea4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapperForType; @@ -30,6 +31,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; @@ -180,7 +182,7 @@ private void performRecoveryOperation(Engine engine, Translog.Operation operatio final String reason = noOp.reason(); logger.trace("[translog] recover [no_op] op [({}, {})] of [{}]", seqNo, primaryTerm, reason); final Engine.NoOp engineNoOp = - new Engine.NoOp(seqNo, primaryTerm, origin, System.nanoTime(), reason); + new Engine.NoOp(null, seqNo, primaryTerm, 0, VersionType.INTERNAL, origin, System.nanoTime(), reason); noOp(engine, engineNoOp); break; default: diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java deleted file mode 100644 index be22679a4e1bf..0000000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; - -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; - -public interface BucketMetricValue extends NumericMetricsAggregation.SingleValue { - - String[] keys(); -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java index 9c9da2f26bd53..76284d275553f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java @@ -31,7 +31,7 @@ import java.util.List; import java.util.Map; -public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue implements BucketMetricValue { +public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue { public static final String NAME = "bucket_metric_value"; private double value; @@ -72,7 +72,6 @@ public double value() { return value; } - @Override public String[] keys() { return keys; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index dea04d1710685..9ab6551d6fd07 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -19,17 +19,9 @@ package org.elasticsearch.action.admin.indices.create; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterInfoService; @@ -41,7 +33,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -53,7 +44,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.containsString; public class ShrinkIndexIT extends ESIntegTestCase { @@ -260,76 +250,4 @@ public void testCreateShrinkIndexFails() throws Exception { ensureGreen(); assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); } - - public void testCreateShrinkWithIndexSort() throws Exception { - SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); - expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); - Sort expectedIndexSort = new Sort(expectedSortField); - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("source") - .setSettings( - Settings.builder() - .put(indexSettings()) - .put("sort.field", "id") - .put("sort.order", "desc") - .put("number_of_shards", 8) - .put("number_of_replicas", 0) - ) - .addMapping("t1", "id", "type=keyword,doc_values=true") - .get(); - for (int i = 0; i < 20; i++) { - client().prepareIndex("source", "t1", Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); - } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); - DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); - String mergeNode = discoveryNodes[0].getName(); - // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node - // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due - // to the require._name below. - ensureGreen(); - - flushAndRefresh(); - assertSortedSegments("source", expectedIndexSort); - - // relocate all shards to one node such that we can merge it. - client().admin().indices().prepareUpdateSettings("source") - .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode) - .put("index.blocks.write", true)).get(); - ensureGreen(); - - // check that index sort cannot be set on the target index - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, - () -> client().admin().indices().prepareShrinkIndex("source", "target") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", "2") - .put("index.sort.field", "foo") - .build()).get()); - assertThat(exc.getMessage(), containsString("can't override index sort when shrinking index")); - - // check that the index sort order of `source` is correctly applied to the `target` - assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", "2").build()).get()); - ensureGreen(); - flushAndRefresh(); - GetSettingsResponse settingsResponse = - client().admin().indices().prepareGetSettings("target").execute().actionGet(); - assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); - assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); - assertSortedSegments("target", expectedIndexSort); - - // ... and that the index sort is also applied to updates - for (int i = 20; i < 40; i++) { - client().prepareIndex("target", randomFrom("t1", "t2", "t3")) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); - } - flushAndRefresh(); - assertSortedSegments("target", expectedIndexSort); - } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index a444f0d6c6cd1..a4bf0d77a1c26 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.bulk.TransportShardBulkAction.ReplicaItemExecutionMode; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -34,9 +34,14 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -47,12 +52,15 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; -import org.mockito.ArgumentCaptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.MappingUpdatePerformer; +import org.elasticsearch.action.bulk.BulkItemResultHolder; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.elasticsearch.action.bulk.TransportShardBulkAction.replicaItemExecutionMode; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; @@ -88,38 +96,26 @@ public void testShouldExecuteReplicaItem() throws Exception { DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); BulkItemRequest request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, response)); - assertThat(replicaItemExecutionMode(request, 0), - equalTo(ReplicaItemExecutionMode.NORMAL)); + assertTrue(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); - // Failed index requests without sequence no should not be replicated + // Failed index requests should not be replicated (for now!) writeRequest = new IndexRequest("index", "type", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse( new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, new BulkItemResponse.Failure("index", "type", "id", new IllegalArgumentException("i died")))); - assertThat(replicaItemExecutionMode(request, 0), - equalTo(ReplicaItemExecutionMode.NOOP)); + assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); - // Failed index requests with sequence no should be replicated - request = new BulkItemRequest(0, writeRequest); - request.setPrimaryResponse( - new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, - new BulkItemResponse.Failure("index", "type", "id", - new IllegalArgumentException( - "i died after sequence no was generated"), - 1))); - assertThat(replicaItemExecutionMode(request, 0), - equalTo(ReplicaItemExecutionMode.FAILURE)); // NOOP requests should not be replicated writeRequest = new UpdateRequest("index", "type", "id"); response = new UpdateResponse(shardId, "type", "id", 1, DocWriteResponse.Result.NOOP); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, response)); - assertThat(replicaItemExecutionMode(request, 0), - equalTo(ReplicaItemExecutionMode.NOOP)); + assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); } @@ -521,35 +517,6 @@ public void testCalculateTranslogLocation() throws Exception { } - public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { - final IndexShard shard = spy(newStartedShard(false)); - BulkItemRequest itemRequest = new BulkItemRequest(0, - new IndexRequest("index", "type") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") - ); - final String failureMessage = "simulated primary failure"; - itemRequest.setPrimaryResponse(new BulkItemResponse(0, - randomFrom( - DocWriteRequest.OpType.CREATE, - DocWriteRequest.OpType.DELETE, - DocWriteRequest.OpType.INDEX - ), - new BulkItemResponse.Failure("index", "type", "1", - new IOException(failureMessage), 1L) - )); - BulkItemRequest[] itemRequests = new BulkItemRequest[1]; - itemRequests[0] = itemRequest; - BulkShardRequest bulkShardRequest = new BulkShardRequest( - shard.shardId(), RefreshPolicy.NONE, itemRequests); - TransportShardBulkAction.performOnReplica(bulkShardRequest, shard); - ArgumentCaptor noOp = ArgumentCaptor.forClass(Engine.NoOp.class); - verify(shard, times(1)).markSeqNoAsNoOp(noOp.capture()); - final Engine.NoOp noOpValue = noOp.getValue(); - assertThat(noOpValue.seqNo(), equalTo(1L)); - assertThat(noOpValue.reason(), containsString(failureMessage)); - closeShards(shard); - } - public void testMappingUpdateParsesCorrectNumberOfTimes() throws Exception { IndexMetaData metaData = indexMetaData(); logger.info("--> metadata.getIndex(): {}", metaData.getIndex()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexSortIT.java b/core/src/test/java/org/elasticsearch/index/IndexSortIT.java deleted file mode 100644 index bb59bc948805c..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/IndexSortIT.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESIntegTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; - -public class IndexSortIT extends ESIntegTestCase { - private static final XContentBuilder TEST_MAPPING = createTestMapping(); - - private static XContentBuilder createTestMapping() { - try { - return jsonBuilder() - .startObject() - .startObject("properties") - .startObject("date") - .field("type", "date") - .endObject() - .startObject("numeric") - .field("type", "integer") - .field("doc_values", false) - .endObject() - .startObject("numeric_dv") - .field("type", "integer") - .field("doc_values", true) - .endObject() - .startObject("keyword_dv") - .field("type", "keyword") - .field("doc_values", true) - .endObject() - .startObject("keyword") - .field("type", "keyword") - .field("doc_values", false) - .endObject() - .endObject().endObject(); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - public void testIndexSort() { - SortField dateSort = new SortedNumericSortField("date", SortField.Type.LONG, false); - dateSort.setMissingValue(Long.MAX_VALUE); - SortField numericSort = new SortedNumericSortField("numeric_dv", SortField.Type.LONG, false); - numericSort.setMissingValue(Long.MAX_VALUE); - SortField keywordSort = new SortedSetSortField("keyword_dv", false); - keywordSort.setMissingValue(SortField.STRING_LAST); - Sort indexSort = new Sort(dateSort, numericSort, keywordSort); - prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.number_of_shards", "1") - .put("index.number_of_replicas", "1") - .putArray("index.sort.field", "date", "numeric_dv", "keyword_dv") - ) - .addMapping("test", TEST_MAPPING) - .get(); - for (int i = 0; i < 20; i++) { - client().prepareIndex("test", "test", Integer.toString(i)) - .setSource("numeric_dv", randomInt(), "keyword_dv", randomAlphaOfLengthBetween(10, 20)) - .get(); - } - flushAndRefresh(); - ensureYellow(); - assertSortedSegments("test", indexSort); - } - - public void testInvalidIndexSort() { - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, - () -> prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .putArray("index.sort.field", "invalid_field") - ) - .addMapping("test", TEST_MAPPING) - .get() - ); - assertThat(exc.getMessage(), containsString("unknown index sort field:[invalid_field]")); - - exc = expectThrows(IllegalArgumentException.class, - () -> prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .putArray("index.sort.field", "numeric") - ) - .addMapping("test", TEST_MAPPING) - .get() - ); - assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[numeric]")); - - exc = expectThrows(IllegalArgumentException.class, - () -> prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .putArray("index.sort.field", "keyword") - ) - .addMapping("test", TEST_MAPPING) - .get() - ); - assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[keyword]")); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java deleted file mode 100644 index af3fdf9adbc24..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; -import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class IndexSortSettingsTests extends ESTestCase { - private static IndexSettings indexSettings(Settings settings) { - return indexSettings(settings, null); - } - - private static IndexSettings indexSettings(Settings settings, Version version) { - final Settings newSettings; - if (version != null) { - newSettings = Settings.builder() - .put(settings) - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build(); - } else { - newSettings = settings; - } - return new IndexSettings(newIndexMeta("test", newSettings), Settings.EMPTY); - } - - public void testNoIndexSort() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertFalse(indexSettings.getIndexSortConfig().hasIndexSort()); - } - - public void testSimpleIndexSort() throws IOException { - Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .put("index.sort.order", "asc") - .put("index.sort.mode", "max") - .put("index.sort.missing", "_last") - .build(); - IndexSettings indexSettings = indexSettings(settings); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - assertThat(config.sortSpecs.length, equalTo(1)); - - assertThat(config.sortSpecs[0].field, equalTo("field1")); - assertThat(config.sortSpecs[0].order, equalTo(SortOrder.ASC)); - assertThat(config.sortSpecs[0].missingValue, equalTo("_last")); - assertThat(config.sortSpecs[0].mode, equalTo(MultiValueMode.MAX)); - } - - public void testIndexSortWithArrays() throws IOException { - Settings settings = Settings.builder() - .putArray("index.sort.field", "field1", "field2") - .putArray("index.sort.order", "asc", "desc") - .putArray("index.sort.missing", "_last", "_first") - .build(); - IndexSettings indexSettings = indexSettings(settings); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - assertThat(config.sortSpecs.length, equalTo(2)); - - assertThat(config.sortSpecs[0].field, equalTo("field1")); - assertThat(config.sortSpecs[1].field, equalTo("field2")); - assertThat(config.sortSpecs[0].order, equalTo(SortOrder.ASC)); - assertThat(config.sortSpecs[1].order, equalTo(SortOrder.DESC)); - assertThat(config.sortSpecs[0].missingValue, equalTo("_last")); - assertThat(config.sortSpecs[1].missingValue, equalTo("_first")); - assertNull(config.sortSpecs[0].mode); - assertNull(config.sortSpecs[1].mode); - } - - public void testInvalidIndexSort() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .put("index.sort.order", "asc, desc") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); - assertThat(exc.getMessage(), containsString("index.sort.field:[field1] index.sort.order:[asc, desc], size mismatch")); - } - - public void testInvalidIndexSortWithArray() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .putArray("index.sort.order", new String[] {"asc", "desc"}) - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); - assertThat(exc.getMessage(), - containsString("index.sort.field:[field1] index.sort.order:[asc, desc], size mismatch")); - } - - public void testInvalidOrder() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .put("index.sort.order", "invalid") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); - assertThat(exc.getMessage(), containsString("Illegal sort order:invalid")); - } - - public void testInvalidMode() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .put("index.sort.mode", "invalid") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); - assertThat(exc.getMessage(), containsString("Illegal sort mode: invalid")); - } - - public void testInvalidMissing() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .put("index.sort.missing", "default") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); - assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + - " must be one of [_last, _first]")); - } - - public void testInvalidVersion() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0_UNRELEASED)); - assertThat(exc.getMessage(), - containsString("unsupported index.version.created:5.4.0, " + - "can't set index.sort on versions prior to 6.0.0-alpha1")); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a5bdf5c39641c..71d754ddfb6ca 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -58,8 +58,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -262,7 +260,7 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, An config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), - config.getMaxUnsafeAutoIdTimestamp(), config.getIndexSort()); + config.getMaxUnsafeAutoIdTimestamp()); } @Override @@ -360,18 +358,7 @@ protected InternalEngine createEngine( MergePolicy mergePolicy, @Nullable IndexWriterFactory indexWriterFactory, @Nullable Supplier sequenceNumbersServiceSupplier) throws IOException { - return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, sequenceNumbersServiceSupplier, null); - } - - protected InternalEngine createEngine( - IndexSettings indexSettings, - Store store, - Path translogPath, - MergePolicy mergePolicy, - @Nullable IndexWriterFactory indexWriterFactory, - @Nullable Supplier sequenceNumbersServiceSupplier, - @Nullable Sort indexSort) throws IOException { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null, indexSort); + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, config); if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); @@ -406,24 +393,12 @@ public SequenceNumbersService seqNoService() { public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) { return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), - maxUnsafeAutoIdTimestamp, refreshListener, null); - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener, Sort indexSort) { - return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), - maxUnsafeAutoIdTimestamp, refreshListener, indexSort); + maxUnsafeAutoIdTimestamp, refreshListener); } public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) { - return config(indexSettings, store, translogPath, mergePolicy, deletionPolicy, maxUnsafeAutoIdTimestamp, refreshListener, null); - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp, - ReferenceManager.RefreshListener refreshListener, Sort indexSort) { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); final EngineConfig.OpenMode openMode; @@ -446,7 +421,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListener, - maxUnsafeAutoIdTimestamp, indexSort); + maxUnsafeAutoIdTimestamp); return config; } @@ -661,37 +636,6 @@ public void testSegmentsWithMergeFlag() throws Exception { } } - public void testSegmentsWithIndexSort() throws Exception { - Sort indexSort = new Sort(new SortedSetSortField("_type", false)); - try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, - null, null, indexSort)) { - List segments = engine.segments(true); - assertThat(segments.isEmpty(), equalTo(true)); - - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - engine.index(indexForDoc(doc)); - engine.refresh("test"); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(segments.get(0).getSegmentSort(), equalTo(indexSort)); - - ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); - engine.index(indexForDoc(doc2)); - engine.refresh("test"); - ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); - engine.index(indexForDoc(doc3)); - engine.refresh("test"); - - segments = engine.segments(true); - assertThat(segments.size(), equalTo(3)); - assertThat(segments.get(0).getSegmentSort(), equalTo(indexSort)); - assertThat(segments.get(1).getSegmentSort(), equalTo(indexSort)); - assertThat(segments.get(2).getSegmentSort(), equalTo(indexSort)); - } - } - public void testSegmentsStatsIncludingFileSizes() throws Exception { try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { @@ -2736,7 +2680,7 @@ public void testRecoverFromForeignTranslog() throws IOException { config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); + TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -2913,13 +2857,10 @@ public void testHandleDocumentFailure() throws Exception { } Engine.IndexResult indexResult = engine.index(indexForDoc(doc1)); assertNotNull(indexResult.getFailure()); - // document failures should be recorded in translog - assertNotNull(indexResult.getTranslogLocation()); + throwingIndexWriter.get().clearFailure(); indexResult = engine.index(indexForDoc(doc1)); assertNull(indexResult.getFailure()); - // document failures should be recorded in translog - assertNotNull(indexResult.getTranslogLocation()); engine.index(indexForDoc(doc2)); // test failure while deleting @@ -3731,9 +3672,12 @@ public long generateSeqNo() { final String reason = randomAlphaOfLength(16); noOpEngine.noOp( new Engine.NoOp( - maxSeqNo + 1, + null, + maxSeqNo + 1, primaryTerm, - randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), + 0, + VersionType.INTERNAL, + randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), System.nanoTime(), reason)); assertThat(noOpEngine.seqNoService().getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); diff --git a/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java b/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java deleted file mode 100644 index 9ee0a343b95e5..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortedNumericSelector; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortField; -import org.apache.lucene.util.Version; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Objects; - -public class SegmentTests extends ESTestCase { - static SortField randomSortField() { - if (randomBoolean()) { - SortedNumericSortField field = - new SortedNumericSortField(randomAlphaOfLengthBetween(1, 10), - SortField.Type.INT, - randomBoolean(), - randomBoolean() ? SortedNumericSelector.Type.MAX : SortedNumericSelector.Type.MIN); - if (randomBoolean()) { - field.setMissingValue(randomInt()); - } - return field; - } else { - SortedSetSortField field = - new SortedSetSortField(randomAlphaOfLengthBetween(1, 10), - randomBoolean(), - randomBoolean() ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); - if (randomBoolean()) { - field.setMissingValue(randomBoolean() ? SortedSetSortField.STRING_FIRST : SortedSetSortField.STRING_LAST); - } - return field; - } - } - - static Sort randomIndexSort() { - if (randomBoolean()) { - return null; - } - int size = randomIntBetween(1, 5); - SortField[] fields = new SortField[size]; - for (int i = 0; i < size; i++) { - fields[i] = randomSortField(); - } - return new Sort(fields); - } - - static Segment randomSegment() { - Segment segment = new Segment(randomAlphaOfLength(10)); - segment.committed = randomBoolean(); - segment.search = randomBoolean(); - segment.sizeInBytes = randomNonNegativeLong(); - segment.docCount = randomIntBetween(1, Integer.MAX_VALUE); - segment.delDocCount = randomIntBetween(0, segment.docCount); - segment.version = Version.LUCENE_6_5_0; - segment.compound = randomBoolean(); - segment.mergeId = randomAlphaOfLengthBetween(1, 10); - segment.memoryInBytes = randomNonNegativeLong(); - segment.segmentSort = randomIndexSort(); - return segment; - } - - public void testSerialization() throws IOException { - for (int i = 0; i < 20; i++) { - Segment segment = randomSegment(); - BytesStreamOutput output = new BytesStreamOutput(); - segment.writeTo(output); - output.flush(); - StreamInput input = output.bytes().streamInput(); - Segment deserialized = new Segment(); - deserialized.readFrom(input); - assertTrue(isSegmentEquals(deserialized, segment)); - } - } - - static boolean isSegmentEquals(Segment seg1, Segment seg2) { - return seg1.docCount == seg2.docCount && - seg1.delDocCount == seg2.delDocCount && - seg1.committed == seg2.committed && - seg1.search == seg2.search && - Objects.equals(seg1.version, seg2.version) && - Objects.equals(seg1.compound, seg2.compound) && - seg1.sizeInBytes == seg2.sizeInBytes && - seg1.memoryInBytes == seg2.memoryInBytes && - seg1.getGeneration() == seg2.getGeneration() && - seg1.getName().equals(seg2.getName()) && - seg1.getMergeId().equals(seg2.getMergeId()) && - Objects.equals(seg1.segmentSort, seg2.segmentSort); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 7141550a44fcd..0a6a8f8d46954 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -287,26 +287,4 @@ public void testPartitionedConstraints() { .put("index.routing_partition_size", 2)) .execute().actionGet().isAcknowledged()); } - - public void testIndexSortWithNestedFields() throws IOException { - Settings settings = Settings.builder() - .put("index.sort.field", "_type") - .build(); - IllegalArgumentException invalidNestedException = expectThrows(IllegalArgumentException.class, - () -> createIndex("test", settings, "t", "nested_field", "type=nested")); - assertThat(invalidNestedException.getMessage(), - containsString("cannot have nested fields when index sort is activated")); - IndexService indexService = createIndex("test", settings, "t"); - CompressedXContent nestedFieldMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() - .startObject("properties") - .startObject("nested_field") - .field("type", "nested") - .endObject() - .endObject().endObject().bytes()); - invalidNestedException = expectThrows(IllegalArgumentException.class, - () -> indexService.mapperService().merge("t", nestedFieldMapping, - MergeReason.MAPPING_UPDATE, true)); - assertThat(invalidNestedException.getMessage(), - containsString("cannot have nested fields when index sort is activated")); - } } diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 2243a5769b99a..2996362735f2d 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -22,21 +22,21 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkItemRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.bulk.BulkShardResponse; -import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkActionTests; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction.ReplicaResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -50,6 +50,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; @@ -57,7 +58,6 @@ import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; @@ -77,6 +77,8 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary; +import static org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnReplica; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -145,13 +147,9 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", Integer.toString(docId.incrementAndGet())) - .source("{}", XContentType.JSON); - final BulkItemResponse response = index(indexRequest); - if (response.isFailed()) { - throw response.getFailure().getCause(); - } else { - assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); - } + .source("{}", XContentType.JSON); + final IndexResponse response = index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); } primary.updateGlobalCheckpointOnPrimary(); return numOfDoc; @@ -160,29 +158,43 @@ public int indexDocs(final int numOfDoc) throws Exception { public int appendDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); - final BulkItemResponse response = index(indexRequest); - if (response.isFailed()) { - throw response.getFailure().getCause(); - } else if (response.isFailed() == false) { - assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); - } + final IndexResponse response = index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); } primary.updateGlobalCheckpointOnPrimary(); return numOfDoc; } - public BulkItemResponse index(IndexRequest indexRequest) throws Exception { - PlainActionFuture listener = new PlainActionFuture<>(); + public IndexResponse index(IndexRequest indexRequest) throws Exception { + PlainActionFuture listener = new PlainActionFuture<>(); final ActionListener wrapBulkListener = ActionListener.wrap( - bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), + bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0].getResponse()), listener::onFailure); BulkItemRequest[] items = new BulkItemRequest[1]; - items[0] = new BulkItemRequest(0, indexRequest); + items[0] = new TestBulkItemRequest(0, indexRequest); BulkShardRequest request = new BulkShardRequest(shardId, indexRequest.getRefreshPolicy(), items); new IndexingAction(request, wrapBulkListener, this).execute(); return listener.get(); } + /** BulkItemRequest exposing get/set primary response */ + public class TestBulkItemRequest extends BulkItemRequest { + + TestBulkItemRequest(int id, DocWriteRequest request) { + super(id, request); + } + + @Override + protected void setPrimaryResponse(BulkItemResponse primaryResponse) { + super.setPrimaryResponse(primaryResponse); + } + + @Override + protected BulkItemResponse getPrimaryResponse() { + return super.getPrimaryResponse(); + } + } + public synchronized void startAll() throws IOException { startReplicas(replicas.size()); } @@ -430,7 +442,7 @@ protected Set getInSyncAllocationIds(ShardId shardId, ClusterState clust protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception; - protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; + protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica) throws IOException; class PrimaryRef implements ReplicationOperation.Primary { @@ -527,53 +539,47 @@ class IndexingAction extends ReplicationAction result = executeShardBulkOnPrimary(primary, request); - return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + final IndexRequest indexRequest = (IndexRequest) request.items()[0].request(); + indexRequest.process(null, request.index()); + final IndexResponse indexResponse = indexOnPrimary(indexRequest, primary); + BulkItemResponse[] itemResponses = new BulkItemResponse[1]; + itemResponses[0] = new BulkItemResponse(0, indexRequest.opType(), indexResponse); + ((ReplicationGroup.TestBulkItemRequest) request.items()[0]).setPrimaryResponse(itemResponses[0]); + return new PrimaryResult(request, new BulkShardResponse(primary.shardId(), itemResponses)); } @Override - protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { - executeShardBulkOnReplica(replica, request); - } - } - - private TransportWriteAction.WritePrimaryResult executeShardBulkOnPrimary(IndexShard primary, BulkShardRequest request) throws Exception { - for (BulkItemRequest itemRequest : request.items()) { - if (itemRequest.request() instanceof IndexRequest) { - ((IndexRequest) itemRequest.request()).process(null, index.getName()); - } + protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws IOException { + final ReplicationGroup.TestBulkItemRequest bulkItemRequest = ((ReplicationGroup.TestBulkItemRequest) request.items()[0]); + final DocWriteResponse primaryResponse = bulkItemRequest.getPrimaryResponse().getResponse(); + indexOnReplica(primaryResponse, ((IndexRequest) bulkItemRequest.request()), replica); } - final TransportWriteAction.WritePrimaryResult result = - TransportShardBulkAction.performOnPrimary(request, primary, null, - System::currentTimeMillis, new TransportShardBulkActionTests.NoopMappingUpdatePerformer()); - request.primaryTerm(primary.getPrimaryTerm()); - TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger); - return result; - } - - private void executeShardBulkOnReplica(IndexShard replica, BulkShardRequest request) throws Exception { - final Translog.Location location = TransportShardBulkAction.performOnReplica(request, replica); - TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); } /** * indexes the given requests on the supplied primary, modifying it for replicas */ - BulkShardRequest indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { - final BulkItemRequest bulkItemRequest = new BulkItemRequest(0, request); - BulkItemRequest[] bulkItemRequests = new BulkItemRequest[1]; - bulkItemRequests[0] = bulkItemRequest; - final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), bulkItemRequests); - final TransportWriteAction.WritePrimaryResult result = - executeShardBulkOnPrimary(primary, bulkShardRequest); - return result.replicaRequest(); + protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { + final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, + new TransportShardBulkActionTests.NoopMappingUpdatePerformer()); + request.primaryTerm(primary.getPrimaryTerm()); + TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger); + return new IndexResponse( + primary.shardId(), + request.type(), + request.id(), + indexResult.getSeqNo(), + primary.getPrimaryTerm(), + indexResult.getVersion(), + indexResult.isCreated()); } /** * indexes the given requests on the supplied replica shard */ - void indexOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { - executeShardBulkOnReplica(replica, request); + protected void indexOnReplica(DocWriteResponse response, IndexRequest request, IndexShard replica) throws IOException { + final Engine.IndexResult result = executeIndexRequestOnReplica(response, request, replica); + TransportWriteActionTestHelper.performPostWriteActions(replica, request, result.getTranslogLocation(), logger); } class GlobalCheckpointSync extends ReplicationAction future = shards.asyncRecoverReplica(replica, (indexShard, node) - -> new RecoveryTarget(indexShard, node, recoveryListener, version -> { - }) { + -> new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) { @Override public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { super.cleanFiles(totalTranslogOps, sourceMetaData); @@ -122,8 +113,8 @@ public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception { shards.startAll(); final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); indexRequest.onRetry(); // force an update of the timestamp - final BulkItemResponse response = shards.index(indexRequest); - assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); + final IndexResponse response = shards.index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); if (randomBoolean()) { // lets check if that also happens if no translog record is replicated shards.flush(); } @@ -156,7 +147,7 @@ public void testCheckpointsAdvance() throws Exception { final SeqNoStats shardStats = shard.seqNoStats(); final ShardRouting shardRouting = shard.routingEntry(); logger.debug("seq_no stats for {}: {}", shardRouting, XContentHelper.toString(shardStats, - new ToXContent.MapParams(Collections.singletonMap("pretty", "false")))); + new ToXContent.MapParams(Collections.singletonMap("pretty", "false")))); assertThat(shardRouting + " local checkpoint mismatch", shardStats.getLocalCheckpoint(), equalTo(numDocs - 1L)); assertThat(shardRouting + " global checkpoint mismatch", shardStats.getGlobalCheckpoint(), equalTo(numDocs - 1L)); @@ -167,7 +158,7 @@ public void testCheckpointsAdvance() throws Exception { public void testConflictingOpsOnReplica() throws Exception { Map mappings = - Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); + Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) { shards.startAll(); IndexShard replica1 = shards.getReplicas().get(0); @@ -189,128 +180,4 @@ public void testConflictingOpsOnReplica() throws Exception { } } } - - /** - * test document failures (failures after seq_no generation) are added as noop operation to the translog - * for primary and replica shards - */ - public void testDocumentFailureReplication() throws Exception { - final String failureMessage = "simulated document failure"; - final ThrowingDocumentFailureEngineFactory throwingDocumentFailureEngineFactory = - new ThrowingDocumentFailureEngineFactory(failureMessage); - try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(0)) { - @Override - protected EngineFactory getEngineFactory(ShardRouting routing) { - return throwingDocumentFailureEngineFactory; - }}) { - - // test only primary - shards.startPrimary(); - BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 1, failureMessage); - shards.assertAllEqual(0); - - // add some replicas - int nReplica = randomIntBetween(1, 3); - for (int i = 0; i < nReplica; i++) { - shards.addReplica(); - } - shards.startReplicas(nReplica); - response = shards.index( - new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 2, failureMessage); - shards.assertAllEqual(0); - } - } - - /** - * test request failures (failures before seq_no generation) are not added as a noop to translog - */ - public void testRequestFailureReplication() throws Exception { - try (ReplicationGroup shards = createGroup(0)) { - shards.startAll(); - BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "testRequestFailureException", "1") - .source("{}", XContentType.JSON) - .version(2) - ); - assertTrue(response.isFailed()); - assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); - shards.assertAllEqual(0); - for (IndexShard indexShard : shards) { - try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(0)); - } - } - - // add some replicas - int nReplica = randomIntBetween(1, 3); - for (int i = 0; i < nReplica; i++) { - shards.addReplica(); - } - shards.startReplicas(nReplica); - response = shards.index( - new IndexRequest(index.getName(), "testRequestFailureException", "1") - .source("{}", XContentType.JSON) - .version(2) - ); - assertTrue(response.isFailed()); - assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); - shards.assertAllEqual(0); - for (IndexShard indexShard : shards) { - try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(0)); - } - } - } - } - - /** Throws documentFailure on every indexing operation */ - static class ThrowingDocumentFailureEngineFactory implements EngineFactory { - final String documentFailureMessage; - - ThrowingDocumentFailureEngineFactory(String documentFailureMessage) { - this.documentFailureMessage = documentFailureMessage; - } - - @Override - public Engine newReadWriteEngine(EngineConfig config) { - return InternalEngineTests.createInternalEngine((directory, writerConfig) -> - new IndexWriter(directory, writerConfig) { - @Override - public long addDocument(Iterable doc) throws IOException { - assert documentFailureMessage != null; - throw new IOException(documentFailureMessage); - } - }, null, config); - } - } - - private static void assertNoOpTranslogOperationForDocumentFailure( - Iterable replicationGroup, - int expectedOperation, - String failureMessage) throws IOException { - for (IndexShard indexShard : replicationGroup) { - try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(expectedOperation)); - final Translog.Snapshot snapshot = view.snapshot(); - long expectedSeqNo = 0L; - Translog.Operation op = snapshot.next(); - do { - assertThat(op.opType(), equalTo(Translog.Operation.Type.NO_OP)); - assertThat(op.seqNo(), equalTo(expectedSeqNo)); - assertThat(((Translog.NoOp) op).reason(), containsString(failureMessage)); - op = snapshot.next(); - expectedSeqNo++; - } while (op != null); - } - } - } } diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 139c7f500d8d7..12f749e681918 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -24,9 +24,9 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.index.engine.Engine; @@ -168,8 +168,8 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { for (int i = 0; i < rollbackDocs; i++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i) .source("{}", XContentType.JSON); - final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); - indexOnReplica(bulkShardRequest, replica); + final IndexResponse primaryResponse = indexOnPrimary(indexRequest, oldPrimary); + indexOnReplica(primaryResponse, indexRequest, replica); } if (randomBoolean()) { oldPrimary.flush(new FlushRequest(index.getName())); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index e68ee0758fc24..fec0b766d3490 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -531,7 +531,7 @@ public static final IndexShard newIndexShard(IndexService indexService, IndexSh IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), - shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), + shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexService.getThreadPool(), indexService.getBigArrays(), null, () -> {}, Collections.emptyList(), Arrays.asList(listeners)); return newShard; diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 3e5a34c3921fe..b7e20cf75c83c 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -123,7 +123,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); + TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); engine = new InternalEngine(config); listeners.setTranslog(engine.getTranslog()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java index dc7d620a97b37..f31733dc47723 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java @@ -20,8 +20,6 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -29,10 +27,6 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -53,27 +47,13 @@ public class StoreRecoveryTests extends ESTestCase { public void testAddIndices() throws IOException { Directory[] dirs = new Directory[randomIntBetween(1, 10)]; final int numDocs = randomIntBetween(50, 100); - final Sort indexSort; - if (randomBoolean()) { - indexSort = new Sort(new SortedNumericSortField("num", SortField.Type.LONG, true)); - } else { - indexSort = null; - } int id = 0; for (int i = 0; i < dirs.length; i++) { dirs[i] = newFSDirectory(createTempDir()); - IndexWriterConfig iwc = newIndexWriterConfig() - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE); - if (indexSort != null) { - iwc.setIndexSort(indexSort); - } - IndexWriter writer = new IndexWriter(dirs[i], iwc); + IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE)); for (int j = 0; j < numDocs; j++) { - writer.addDocument(Arrays.asList( - new StringField("id", Integer.toString(id++), Field.Store.YES), - new SortedNumericDocValuesField("num", randomLong()) - )); + writer.addDocument(Arrays.asList(new StringField("id", Integer.toString(id++), Field.Store.YES))); } writer.commit(); @@ -82,7 +62,7 @@ public void testAddIndices() throws IOException { StoreRecovery storeRecovery = new StoreRecovery(new ShardId("foo", "bar", 1), logger); RecoveryState.Index indexStats = new RecoveryState.Index(); Directory target = newFSDirectory(createTempDir()); - storeRecovery.addIndices(indexStats, target, indexSort, dirs); + storeRecovery.addIndices(indexStats, target, dirs); int numFiles = 0; Predicate filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false && f.startsWith("extra") == false; @@ -100,11 +80,7 @@ public void testAddIndices() throws IOException { DirectoryReader reader = DirectoryReader.open(target); SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target); for (SegmentCommitInfo info : segmentCommitInfos) { // check that we didn't merge - assertEquals("all sources must be flush", - info.info.getDiagnostics().get("source"), "flush"); - if (indexSort != null) { - assertEquals(indexSort, info.info.getIndexSort()); - } + assertEquals("all sources must be flush", info.info.getDiagnostics().get("source"), "flush"); } assertEquals(reader.numDeletedDocs(), 0); assertEquals(reader.numDocs(), id); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index eb7985002a324..2b8f7515ae7a6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2247,11 +2247,11 @@ public void testSimpleCommit() throws IOException { final long generation = randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration())); translog.commit(generation); - for (long g = 0; g < generation; g++) { - assertFileDeleted(translog, g); + for (long i = 0; i < generation; i++) { + assertFileDeleted(translog, i); } - for (long g = generation; g <= translog.currentFileGeneration(); g++) { - assertFileIsPresent(translog, g); + for (long i = generation; i <= translog.currentFileGeneration(); i++) { + assertFileIsPresent(translog, i); } } @@ -2271,10 +2271,10 @@ public void testPrepareCommitAndCommit() throws IOException { final int committedGeneration = randomIntBetween(Math.max(1, Math.toIntExact(last)), Math.toIntExact(generation)); translog.commit(committedGeneration); last = committedGeneration; - for (long g = 0; g < committedGeneration; g++) { + for (long g = 0; i < generation; g++) { assertFileDeleted(translog, g); } - for (long g = committedGeneration; g <= translog.currentFileGeneration(); g++) { + for (long g = generation; g < translog.currentFileGeneration(); g++) { assertFileIsPresent(translog, g); } } @@ -2302,7 +2302,7 @@ public void testCommitWithOpenView() throws IOException { } // the view generation could be -1 if no commit has been performed final long max = Math.max(1, Math.min(lastCommittedGeneration, viewGeneration)); - for (long g = max; g <= translog.currentFileGeneration(); g++) { + for (long g = max; g < translog.currentFileGeneration(); g++) { assertFileIsPresent(translog, g); } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index f3bd58fd38a1c..cf22c95ac6997 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -151,7 +151,6 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th when(indexService.mapperService()).thenReturn(mapperService); when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList()); when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {}); - when(indexService.getIndexSortSupplier()).thenReturn(() -> null); return indexService; }); } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java deleted file mode 100644 index 0cfa07538e424..0000000000000 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.metrics.percentiles; - -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregationTestCase; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.junit.Before; - -import java.util.List; -import java.util.Map; - -public abstract class InternalPercentilesTestCase extends InternalAggregationTestCase { - - private double[] percents; - - @Before - public void init() { - percents = randomPercents(); - } - - @Override - protected T createTestInstance(String name, List pipelineAggregators, Map metaData) { - int numValues = randomInt(100); - double[] values = new double[numValues]; - for (int i = 0; i < numValues; ++i) { - values[i] = randomDouble(); - } - return createTestInstance(name, pipelineAggregators, metaData, randomBoolean(), DocValueFormat.RAW, percents, values); - } - - protected abstract T createTestInstance(String name, List pipelineAggregators, Map metaData, - boolean keyed, DocValueFormat format, double[] percents, double[] values); - - private static double[] randomPercents() { - List randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); - double[] percents = new double[randomCdfValues.size()]; - for (int i = 0; i < randomCdfValues.size(); i++) { - percents[i] = randomCdfValues.get(i); - } - return percents; - } -} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java deleted file mode 100644 index bff026d5cf4b0..0000000000000 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; - -import org.HdrHistogram.DoubleHistogram; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -public class InternalHDRPercentilesTests extends InternalPercentilesTestCase { - - @Override - protected InternalHDRPercentiles createTestInstance(String name, - List pipelineAggregators, - Map metaData, - boolean keyed, DocValueFormat format, double[] percents, double[] values) { - - final DoubleHistogram state = new DoubleHistogram(3); - Arrays.stream(values).forEach(state::recordValue); - - return new InternalHDRPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData); - } - - @Override - protected void assertReduced(InternalHDRPercentiles reduced, List inputs) { - // it is hard to check the values due to the inaccuracy of the algorithm - long totalCount = 0; - for (InternalHDRPercentiles ranks : inputs) { - totalCount += ranks.state.getTotalCount(); - } - assertEquals(totalCount, reduced.state.getTotalCount()); - } - - @Override - protected Writeable.Reader instanceReader() { - return InternalHDRPercentiles::new; - } -} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java index f2db4a48530ed..75efa516409ae 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java @@ -21,24 +21,29 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import java.util.Arrays; import java.util.List; import java.util.Map; -public class InternalTDigestPercentilesTests extends InternalPercentilesTestCase { +public class InternalTDigestPercentilesTests extends InternalAggregationTestCase { + + private final double[] percents = randomPercents(); @Override protected InternalTDigestPercentiles createTestInstance(String name, List pipelineAggregators, - Map metaData, - boolean keyed, DocValueFormat format, double[] percents, double[] values) { - final TDigestState state = new TDigestState(100); - Arrays.stream(values).forEach(state::add); + Map metaData) { + boolean keyed = randomBoolean(); + DocValueFormat format = DocValueFormat.RAW; + TDigestState state = new TDigestState(100); - assertEquals(state.centroidCount(), values.length); + int numValues = randomInt(10); + for (int i = 0; i < numValues; ++i) { + state.add(randomDouble() * 100); + } + assertEquals(state.centroidCount(), numValues); return new InternalTDigestPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData); } @@ -64,4 +69,13 @@ protected void assertReduced(InternalTDigestPercentiles reduced, List instanceReader() { return InternalTDigestPercentiles::new; } + + private static double[] randomPercents() { + List randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); + double[] percents = new double[randomCdfValues.size()]; + for (int i = 0; i < randomCdfValues.size(); i++) { + percents[i] = randomCdfValues.get(i); + } + return percents; + } } diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 0362f2a6fe3a3..f69df7f51713f 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -169,7 +169,7 @@ Before starting, you need to have: * A http://www.windowsazure.com/[Windows Azure account] * OpenSSL that isn't from MacPorts, specifically `OpenSSL 1.0.1f 6 Jan 2014` doesn't seem to create a valid keypair for ssh. FWIW, - `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 14.04 LTS is known to work. + `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 12.04 LTS is known to work. * SSH keys and certificate + -- diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc deleted file mode 100644 index 0c2b5c9abe979..0000000000000 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -[[index-modules-index-sorting]] -== Index Sorting - -experimental[] - -When creating a new index in elasticsearch it is possible to configure how the Segments -inside each Shard will be sorted. By default Lucene does not apply any sort. -The `index.sort.*` settings define which fields should be used to sort the documents inside each Segment. - -[WARNING] -nested fields are not compatible with index sorting because they rely on the assumption -that nested documents are stored in contiguous doc ids, which can be broken by index sorting. -An error will be thrown if index sorting is activated on an index that contains nested fields. - -For instance the following example shows how to define a sort on a single field: - -[source,js] --------------------------------------------------- -PUT twitter -{ - "settings" : { - "index" : { - "sort.field" : "date", <1> - "sort.order" : "desc" <2> - } - }, - "mappings": { - "tweet": { - "properties": { - "date": { - "type": "date" - } - } - } - } -} --------------------------------------------------- -// CONSOLE - -<1> This index is sorted by the `date` field -<2> ... in descending order. - -It is also possible to sort the index by more than one field: - -[source,js] --------------------------------------------------- -PUT twitter -{ - "settings" : { - "index" : { - "sort.field" : ["username", "date"], <1> - "sort.order" : ["asc", "desc"] <2> - } - }, - "mappings": { - "tweet": { - "properties": { - "username": { - "type": "keyword", - "doc_values": true - }, - "date": { - "type": "date" - } - } - } - } -} --------------------------------------------------- -// CONSOLE - -<1> This index is sorted by `username` first then by `date` -<2> ... in ascending order for the `username` field and in descending order for the `date` field. - - -Index sorting supports the following settings: - -`index.sort.field`:: - - The list of fields used to sort the index. - Only `boolean`, `numeric`, `date` and `keyword` fields with `doc_values` are allowed here. - -`index.sort.order`:: - - The sort order to use for each field. - The order option can have the following values: - * `asc`: For ascending order - * `desc`: For descending order. - -`index.sort.mode`:: - - Elasticsearch supports sorting by multi-valued fields. - The mode option controls what value is picked to sort the document. - The mode option can have the following values: - * `min`: Pick the lowest value. - * `max`: Pick the highest value. - -`index.sort.missing`:: - - The missing parameter specifies how docs which are missing the field should be treated. - The missing value can have the following values: - * `_last`: Documents without value for the field are sorted last. - * `_first`: Documents without value for the field are sorted first. - -[WARNING] -Index sorting can be defined only once at index creation. It is not allowed to add or update -a sort on an existing index. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index f195ee1f2fd75..9f919a2802ded 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1511,72 +1511,15 @@ Converts a JSON string into a structured JSON object. | `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. |====== -Suppose you provide this configuration of the `json` processor: - -[source,js] --------------------------------------------------- -{ - "json" : { - "field" : "string_source", - "target_field" : "json_target" - } -} --------------------------------------------------- - -If the following document is processed: - -[source,js] --------------------------------------------------- -{ - "string_source": "{\"foo\": 2000}" -} --------------------------------------------------- - -after the `json` processor operates on it, it will look like: - [source,js] -------------------------------------------------- { - "string_source": "{\"foo\": 2000}", - "json_target": { - "foo": 2000 + "json": { + "field": "{\"foo\": 2000}" } } -------------------------------------------------- -If the following configuration is provided, omitting the optional `target_field` setting: -[source,js] --------------------------------------------------- -{ - "json" : { - "field" : "source_and_target" - } -} --------------------------------------------------- - -then after the `json` processor operates on this document: - -[source,js] --------------------------------------------------- -{ - "source_and_target": "{\"foo\": 2000}" -} --------------------------------------------------- - -it will look like: - -[source,js] --------------------------------------------------- -{ - "source_and_target": { - "foo": 2000 - } -} --------------------------------------------------- - -This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` -is the same field provided in the required `field` configuration. - [[kv-processor]] === KV Processor This processor helps automatically parse messages (or specific event fields) which are of the foo=bar variety. diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 45839ac55d950..815606fb7bd3d 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -6,7 +6,7 @@ contains any value other than `null`. This field is used by the <> query to find documents that either have or don't have any non-+null+ value for a particular field. -The value of the `_field_names` field is accessible in queries: +The value of the `_field_name` field is accessible in queries: [source,js] -------------------------- @@ -34,4 +34,4 @@ GET my_index/_search -------------------------- // CONSOLE -<1> Querying on the `_field_names` field (also see the <> query) +<1> Querying on the `_field_names` field (also see the <> query) \ No newline at end of file diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 95923070e924b..0a41d0f465354 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -235,7 +235,8 @@ For example, bind-mounting a `custom_elasticsearch.yml` with `docker run` can be -------------------------------------------- -v full_path_to/custom_elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -------------------------------------------- -IMPORTANT: The container **runs Elasticsearch as user `elasticsearch` using uid:gid `1000:1000`**. Bind mounted host directories and files, such as `custom_elasticsearch.yml` above, **need to be accessible by this user**. For the https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings[data and log dirs], such as `/usr/share/elasticsearch/data`, write access is required as well. + +IMPORTANT: `custom_elasticsearch.yml` should be readable by uid:gid `1000:1000` ===== C. Customized image In some environments, it may make more sense to prepare a custom image containing your configuration. A `Dockerfile` to achieve this may be as simple as: @@ -273,8 +274,6 @@ We have collected a number of best practices for production use. NOTE: Any Docker parameters mentioned below assume the use of `docker run`. -. Elasticsearch inside the container runs as user `elasticsearch` using uid:gid `1000:1000`. If you are bind mounting a local directory or file, ensure it is readable by this user while the https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings[data and log dirs] additionally require write access. - . It is important to correctly set capabilities and ulimits via the Docker CLI. As seen earlier in the example <>, the following options are required: + --cap-add=IPC_LOCK --ulimit memlock=-1:-1 --ulimit nofile=65536:65536 diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java index e723081e36c0c..378cca7f58fb2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java @@ -43,8 +43,7 @@ public final class CompilerSettings { public static final String PICKY = "picky"; /** - * Hack to set the initial "depth" for the {@link DefBootstrap.PIC} and {@link DefBootstrap.MIC}. Only used for testing: do not - * overwrite. + * For testing: do not use. */ public static final String INITIAL_CALL_SITE_DEPTH = "initialCallSiteDepth"; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java index 5fc41c8c63038..69b40f141e2a2 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java @@ -77,8 +77,8 @@ private void arrayLoadStoreTestCase(boolean declareAsDef, String valueType, Obje } private void expectOutOfBounds(int index, String script, Object val) { - IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, () -> - exec(script, singletonMap("val", val), true)); + IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, + () -> exec(script, singletonMap("val", val), true)); try { assertThat(e.getMessage(), outOfBoundsExceptionMessageMatcher(index, 5)); } catch (AssertionError ae) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index 97e1f01fdfc94..ef2ddad5452d0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -186,7 +186,7 @@ public void testNullSafeDeref() { assertNull( exec("def a = null; return a?.toString()")); assertEquals("foo", exec("def a = 'foo'; return a?.toString()")); // Call with primitive result - assertMustBeNullable( "String a = null; return a?.length()"); + assertMustBeNullable( "String a = null; return a?.length()"); assertMustBeNullable( "String a = 'foo'; return a?.length()"); assertNull( exec("def a = null; return a?.length()")); assertEquals(3, exec("def a = 'foo'; return a?.length()")); @@ -265,7 +265,7 @@ public void testNullSafeDeref() { } private void assertMustBeNullable(String script) { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> exec(script)); + Exception e = expectScriptThrows(IllegalArgumentException.class , () -> exec(script)); assertEquals("Result of null safe operator must be nullable", e.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java index c3861add319dd..fe95e8c8c2316 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java @@ -325,7 +325,7 @@ public interface NoArgumentsConstant { Object execute(String foo); } public void testNoArgumentsConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(NoArgumentsConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + "names of the method arguments but [" + NoArgumentsConstant.class.getName() + "] doesn't have one.")); @@ -336,7 +336,7 @@ public interface WrongArgumentsConstant { Object execute(String foo); } public void testWrongArgumentsConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(WrongArgumentsConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + "names of the method arguments but [" + WrongArgumentsConstant.class.getName() + "] doesn't have one.")); @@ -347,7 +347,7 @@ public interface WrongLengthOfArgumentConstant { Object execute(String foo); } public void testWrongLengthOfArgumentConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(WrongLengthOfArgumentConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but [" + WrongLengthOfArgumentConstant.class.getName() + "#execute] takes [1] argument.")); @@ -358,7 +358,7 @@ public interface UnknownArgType { Object execute(UnknownArgType foo); } public void testUnknownArgType() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(UnknownArgType.class, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments " + "that are of whitelisted types.", e.getMessage()); @@ -369,7 +369,7 @@ public interface UnknownReturnType { UnknownReturnType execute(String foo); } public void testUnknownReturnType() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(UnknownReturnType.class, null, "1", emptyMap())); assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName() + "#execute] returns [" + UnknownReturnType.class.getName() + "] which isn't whitelisted.", e.getMessage()); @@ -380,7 +380,7 @@ public interface UnknownArgTypeInArray { Object execute(UnknownArgTypeInArray[] foo); } public void testUnknownArgTypeInArray() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(UnknownArgTypeInArray.class, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept " + "arguments that are of whitelisted types.", e.getMessage()); @@ -391,7 +391,7 @@ public interface TwoExecuteMethods { Object execute(boolean foo); } public void testTwoExecuteMethods() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(TwoExecuteMethods.class, null, "null", emptyMap())); assertEquals("Painless can only implement interfaces that have a single method named [execute] but [" + TwoExecuteMethods.class.getName() + "] has more than one.", e.getMessage()); @@ -401,7 +401,7 @@ public interface BadMethod { Object something(); } public void testBadMethod() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(BadMethod.class, null, "null", emptyMap())); assertEquals("Painless can only implement methods named [execute] and [uses$argName] but [" + BadMethod.class.getName() + "] contains a method named [something]", e.getMessage()); @@ -413,7 +413,7 @@ public interface BadUsesReturn { Object uses$foo(); } public void testBadUsesReturn() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(BadUsesReturn.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that return boolean but [" + BadUsesReturn.class.getName() + "#uses$foo] returns [java.lang.Object].", e.getMessage()); @@ -425,7 +425,7 @@ public interface BadUsesParameter { boolean uses$bar(boolean foo); } public void testBadUsesParameter() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(BadUsesParameter.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that do not take parameters but [" + BadUsesParameter.class.getName() + "#uses$bar] does.", e.getMessage()); @@ -437,7 +437,7 @@ public interface BadUsesName { boolean uses$baz(); } public void testBadUsesName() { - Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(BadUsesName.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that match a parameter name but [" + BadUsesName.class.getName() + "#uses$baz] doesn't match any of [foo, bar].", e.getMessage()); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java index bcb92a527d9e6..bce70a080dbe6 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java @@ -204,7 +204,7 @@ public void testNestedCaptureParams() { public void testWrongArity() { assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("Optional.empty().orElseGet(x -> x);"); }); assertTrue(expected.getMessage().contains("Incorrect number of parameters")); @@ -220,7 +220,7 @@ public void testWrongArityDef() { public void testWrongArityNotEnough() { assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); }); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 92ff9ef3c9334..83a592b3f2632 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -26,8 +26,10 @@ import java.util.Arrays; import java.util.HashSet; import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; public class RegexTests extends ScriptTestCase { @Override @@ -262,9 +264,8 @@ public void testBadRegexPattern() { assertEquals("Error compiling regex: Illegal Unicode escape sequence", e.getCause().getMessage()); // And make sure the location of the error points to the offset inside the pattern - assertScriptStack(e, - "/\\ujjjj/", - " ^---- HERE"); + assertEquals("/\\ujjjj/", e.getScriptStack().get(0)); + assertEquals(" ^---- HERE", e.getScriptStack().get(1)); } public void testRegexAgainstNumber() { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 1ab5aa14508ce..74c6c9a5628f0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -35,8 +35,6 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.Matchers.hasSize; - /** * Base test case for scripting unit tests. *

@@ -116,29 +114,10 @@ public void assertBytecodeHasPattern(String script, String pattern) { /** Checks a specific exception class is thrown (boxed inside ScriptException) and returns it. */ public static T expectScriptThrows(Class expectedType, ThrowingRunnable runnable) { - return expectScriptThrows(expectedType, true, runnable); - } - - /** Checks a specific exception class is thrown (boxed inside ScriptException) and returns it. */ - public static T expectScriptThrows(Class expectedType, boolean shouldHaveScriptStack, - ThrowingRunnable runnable) { try { runnable.run(); } catch (Throwable e) { if (e instanceof ScriptException) { - boolean hasEmptyScriptStack = ((ScriptException) e).getScriptStack().isEmpty(); - if (shouldHaveScriptStack && hasEmptyScriptStack) { - if (0 != e.getCause().getStackTrace().length) { - // Without -XX:-OmitStackTraceInFastThrow the jvm can eat the stack trace which causes us to ignore script_stack - AssertionFailedError assertion = new AssertionFailedError("ScriptException should have a scriptStack"); - assertion.initCause(e); - throw assertion; - } - } else if (false == shouldHaveScriptStack && false == hasEmptyScriptStack) { - AssertionFailedError assertion = new AssertionFailedError("ScriptException shouldn't have a scriptStack"); - assertion.initCause(e); - throw assertion; - } e = e.getCause(); if (expectedType.isInstance(e)) { return expectedType.cast(e); @@ -155,21 +134,4 @@ public static T expectScriptThrows(Class expectedType, } throw new AssertionFailedError("Expected exception " + expectedType.getSimpleName()); } - - /** - * Asserts that the script_stack looks right. - */ - public static void assertScriptStack(ScriptException e, String... stack) { - // This particular incantation of assertions makes the error messages more useful - try { - assertThat(e.getScriptStack(), hasSize(stack.length)); - for (int i = 0; i < stack.length; i++) { - assertEquals(stack[i], e.getScriptStack().get(i)); - } - } catch (AssertionError assertion) { - assertion.initCause(e); - throw assertion; - } - } - } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java index 2888eca3db4fa..da4558a693a0d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java @@ -165,12 +165,12 @@ public void testStringAndCharacter() { assertEquals('c', exec("String s = \"c\"; (char)s")); assertEquals('c', exec("String s = 'c'; (char)s")); - ClassCastException expected = expectScriptThrows(ClassCastException.class, false, () -> { + ClassCastException expected = expectScriptThrows(ClassCastException.class, () -> { assertEquals("cc", exec("return (String)(char)\"cc\"")); }); assertTrue(expected.getMessage().contains("Cannot cast [String] with length greater than one to [char].")); - expected = expectScriptThrows(ClassCastException.class, false, () -> { + expected = expectScriptThrows(ClassCastException.class, () -> { assertEquals("cc", exec("return (String)(char)'cc'")); }); assertTrue(expected.getMessage().contains("Cannot cast [String] with length greater than one to [char].")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index d60da7b795fbc..aaa337ae821ba 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.painless; -import junit.framework.AssertionFailedError; - import org.apache.lucene.util.Constants; -import org.elasticsearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; import java.util.Arrays; @@ -30,93 +27,52 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.instanceOf; public class WhenThingsGoWrongTests extends ScriptTestCase { public void testNullPointer() { expectScriptThrows(NullPointerException.class, () -> { exec("int x = params['missing']; return x;"); }); - expectScriptThrows(NullPointerException.class, () -> { - exec("Double.parseDouble(params['missing'])"); - }); } - /** - * Test that the scriptStack looks good. By implication this tests that we build proper "line numbers" in stack trace. These line - * numbers are really 1 based character numbers. - */ - public void testScriptStack() { - for (String type : new String[] {"String", "def "}) { - // trigger NPE at line 1 of the script - ScriptException exception = expectThrows(ScriptException.class, () -> { - exec(type + " x = null; boolean y = x.isEmpty();\n" + - "return y;"); - }); - // null deref at x.isEmpty(), the '.' is offset 30 - assertScriptElementColumn(30, exception); - assertScriptStack(exception, - "y = x.isEmpty();\n", - " ^---- HERE"); - assertThat(exception.getCause(), instanceOf(NullPointerException.class)); - - // trigger NPE at line 2 of the script - exception = expectThrows(ScriptException.class, () -> { - exec(type + " x = null;\n" + - "return x.isEmpty();"); - }); - // null deref at x.isEmpty(), the '.' is offset 25 - assertScriptElementColumn(25, exception); - assertScriptStack(exception, - "return x.isEmpty();", - " ^---- HERE"); - assertThat(exception.getCause(), instanceOf(NullPointerException.class)); - - // trigger NPE at line 3 of the script - exception = expectThrows(ScriptException.class, () -> { - exec(type + " x = null;\n" + - type + " y = x;\n" + - "return y.isEmpty();"); - }); - // null deref at y.isEmpty(), the '.' is offset 39 - assertScriptElementColumn(39, exception); - assertScriptStack(exception, - "return y.isEmpty();", - " ^---- HERE"); - assertThat(exception.getCause(), instanceOf(NullPointerException.class)); - - // trigger NPE at line 4 in script (inside conditional) - exception = expectThrows(ScriptException.class, () -> { - exec(type + " x = null;\n" + - "boolean y = false;\n" + - "if (!y) {\n" + - " y = x.isEmpty();\n" + - "}\n" + - "return y;"); - }); - // null deref at x.isEmpty(), the '.' is offset 53 - assertScriptElementColumn(53, exception); - assertScriptStack(exception, - "y = x.isEmpty();\n}\n", - " ^---- HERE"); - assertThat(exception.getCause(), instanceOf(NullPointerException.class)); - } - } + /** test "line numbers" in the bytecode, which are really 1-based offsets */ + public void testLineNumbers() { + // trigger NPE at line 1 of the script + NullPointerException exception = expectScriptThrows(NullPointerException.class, () -> { + exec("String x = null; boolean y = x.isEmpty();\n" + + "return y;"); + }); + // null deref at x.isEmpty(), the '.' is offset 30 (+1) + assertEquals(30 + 1, exception.getStackTrace()[0].getLineNumber()); - private void assertScriptElementColumn(int expectedColumn, ScriptException exception) { - StackTraceElement[] stackTrace = exception.getCause().getStackTrace(); - for (int i = 0; i < stackTrace.length; i++) { - if (WriterConstants.CLASS_NAME.equals(stackTrace[i].getClassName())) { - if (expectedColumn + 1 != stackTrace[i].getLineNumber()) { - AssertionFailedError assertion = new AssertionFailedError("Expected column to be [" + expectedColumn + "] but was [" - + stackTrace[i].getLineNumber() + "]"); - assertion.initCause(exception); - throw assertion; - } - return; - } - } - fail("didn't find script stack element"); + // trigger NPE at line 2 of the script + exception = expectScriptThrows(NullPointerException.class, () -> { + exec("String x = null;\n" + + "return x.isEmpty();"); + }); + // null deref at x.isEmpty(), the '.' is offset 25 (+1) + assertEquals(25 + 1, exception.getStackTrace()[0].getLineNumber()); + + // trigger NPE at line 3 of the script + exception = expectScriptThrows(NullPointerException.class, () -> { + exec("String x = null;\n" + + "String y = x;\n" + + "return y.isEmpty();"); + }); + // null deref at y.isEmpty(), the '.' is offset 39 (+1) + assertEquals(39 + 1, exception.getStackTrace()[0].getLineNumber()); + + // trigger NPE at line 4 in script (inside conditional) + exception = expectScriptThrows(NullPointerException.class, () -> { + exec("String x = null;\n" + + "boolean y = false;\n" + + "if (!y) {\n" + + " y = x.isEmpty();\n" + + "}\n" + + "return y;"); + }); + // null deref at x.isEmpty(), the '.' is offset 53 (+1) + assertEquals(53 + 1, exception.getStackTrace()[0].getLineNumber()); } public void testInvalidShift() { @@ -205,7 +161,7 @@ public void testSourceLimits() { final char[] tooManyChars = new char[Compiler.MAXIMUM_SOURCE_LENGTH + 1]; Arrays.fill(tooManyChars, '0'); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec(new String(tooManyChars)); }); assertTrue(expected.getMessage().contains("Scripts may be no longer than")); @@ -326,4 +282,5 @@ public void testRegularUnexpectedCharacter() { e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'cat", false)); assertEquals("unexpected character ['cat].", e.getMessage()); } + } diff --git a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 6ef40a7778236..f0be775306725 100644 --- a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -41,7 +41,6 @@ import java.util.Map; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.anyOf; @@ -77,7 +76,7 @@ private int indexDocs(String index, final int idStart, final int numDocs) throws for (int i = 0; i < numDocs; i++) { final int id = idStart + i; assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), - new StringEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}", ContentType.APPLICATION_JSON))); + new StringEntity("{\"test\": \"test_" + id + "\"}", ContentType.APPLICATION_JSON))); } return numDocs; } @@ -117,7 +116,7 @@ public void testIndexVersionPropagation() throws Exception { .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._name", bwcNames); - final String index = "indexversionprop"; + final String index = "test"; final int minUpdates = 5; final int maxUpdates = 10; createIndex(index, settings.build()); @@ -131,9 +130,7 @@ public void testIndexVersionPropagation() throws Exception { updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); assertOK(client().performRequest("POST", index + "/_refresh")); - List shards = buildShards(index, nodes, newNodeClient); - Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("primary resolved to: " + primary.getNode().getNodeName()); + List shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 1, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc1); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 1); @@ -143,15 +140,13 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(index, nodes, newNodeClient); - primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("primary resolved to: " + primary.getNode().getNodeName()); + shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 2, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc2); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 2); } - primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); + Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); @@ -159,7 +154,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(index, nodes, newNodeClient); + shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 3); @@ -172,7 +167,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(index, nodes, newNodeClient); + shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 4); @@ -185,7 +180,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(index, nodes, newNodeClient); + shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 5); @@ -221,7 +216,7 @@ public void testSeqNoCheckpoints() throws Exception { final int numberOfInitialDocs = 1 + randomInt(5); logger.info("indexing [{}] docs initially", numberOfInitialDocs); numDocs += indexDocs(index, 0, numberOfInitialDocs); - assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, 0, newNodeClient); + assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); logger.info("allowing shards on all nodes"); updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); @@ -232,8 +227,8 @@ public void testSeqNoCheckpoints() throws Exception { final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5); logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes); numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes); - assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, 0, newNodeClient); - Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); + assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); + Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); @@ -242,7 +237,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("indexing [{}] docs after moving primary", numberOfDocsAfterMovingPrimary); numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary); numDocs += numberOfDocsAfterMovingPrimary; - assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); + assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); /* * Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in * the recovery code. @@ -260,7 +255,7 @@ public void testSeqNoCheckpoints() throws Exception { // the number of documents on the primary and on the recovered replica should match the number of indexed documents assertCount(index, "_primary", numDocs); assertCount(index, "_replica", numDocs); - assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); + assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); } } @@ -279,11 +274,10 @@ private void assertVersion(final String index, final int docId, final String pre assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); } - private void assertSeqNoOnShards(String index, Nodes nodes, boolean checkGlobalCheckpoints, int numDocs, RestClient client) - throws Exception { + private void assertSeqNoOnShards(Nodes nodes, boolean checkGlobalCheckpoints, int numDocs, RestClient client) throws Exception { assertBusy(() -> { try { - List shards = buildShards(index, nodes, client); + List shards = buildShards(nodes, client); Shard primaryShard = shards.stream().filter(Shard::isPrimary).findFirst().get(); assertNotNull("failed to find primary shard", primaryShard); final long expectedGlobalCkp; @@ -317,9 +311,9 @@ private void assertSeqNoOnShards(String index, Nodes nodes, boolean checkGlobalC }); } - private List buildShards(String index, Nodes nodes, RestClient client) throws IOException { - Response response = client.performRequest("GET", index + "/_stats", singletonMap("level", "shards")); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + private List buildShards(Nodes nodes, RestClient client) throws IOException { + Response response = client.performRequest("GET", "test/_stats", singletonMap("level", "shards")); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices.test.shards.0"); ArrayList shards = new ArrayList<>(); for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml deleted file mode 100644 index 705c2d6f2cbe3..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml +++ /dev/null @@ -1,75 +0,0 @@ ---- -"Index Sort": - - - skip: - version: " - 5.99.99" - reason: this uses a new feature that has been added in 6.0.0 - - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 1 - number_of_replicas: 1 - index.sort.field: rank - mappings: - t: - properties: - rank: - type: integer - - - do: - index: - index: test - type: test - id: "1" - body: { "rank": 4 } - - - do: - index: - index: test - type: test - id: "2" - body: { "rank": 1 } - - - do: - index: - index: test - type: test - id: "3" - body: { "rank": 3 } - - - do: - index: - index: test - type: test - id: "4" - body: { "rank": 2 } - - - do: - indices.refresh: - index: test - - - do: - indices.forcemerge: - index: test - max_num_segments: 1 - - - do: - indices.refresh: - index: test - - - do: - search: - index: test - type: test - body: - sort: _doc - - - match: {hits.total: 4 } - - length: {hits.hits: 4 } - - match: {hits.hits.0._id: "2" } - - match: {hits.hits.1._id: "4" } - - match: {hits.hits.2._id: "3" } - - match: {hits.hits.3._id: "1" } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4062666ddbbf8..abd3f33b5f77b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -279,7 +279,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe }); IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache, new NoneCircuitBreakerService(), mapperService); - indexShard = new IndexShard(routing, indexSettings, shardPath, store, () ->null, indexCache, mapperService, similarityService, + indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, globalCheckpointSyncer, Collections.emptyList(), Arrays.asList(listeners)); success = true; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 6d15a5e164ef5..db15ac0c33533 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -24,7 +24,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; -import org.apache.lucene.search.Sort; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -46,10 +45,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -115,7 +111,6 @@ import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; @@ -2001,23 +1996,6 @@ public Set assertAllShardsOnNodes(String index, String... pattern) { return nodes; } - - /** - * Asserts that all segments are sorted with the provided {@link Sort}. - */ - public void assertSortedSegments(String indexName, Sort expectedIndexSort) { - IndicesSegmentResponse segmentResponse = - client().admin().indices().prepareSegments(indexName).execute().actionGet(); - IndexSegments indexSegments = segmentResponse.getIndices().get(indexName); - for (IndexShardSegments indexShardSegments : indexSegments.getShards().values()) { - for (ShardSegments shardSegments : indexShardSegments.getShards()) { - for (Segment segment : shardSegments) { - assertThat(expectedIndexSort, equalTo(segment.getSegmentSort())); - } - } - } - } - protected static class NumShards { public final int numPrimaries; public final int numReplicas; From e0059d0cbdc14449e1bd61430a19fc23c2f3d94a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:08:32 -0400 Subject: [PATCH 08/17] Revert "Remove primary term from result" This reverts commit d4bbfcde9713a234d2e82d6ac5dba97ab696adf9. --- .../action/bulk/TransportShardBulkAction.java | 8 ++-- .../elasticsearch/index/engine/Engine.java | 43 ++++++++++++------- .../index/engine/InternalEngine.java | 12 +++--- .../ESIndexLevelReplicationTestCase.java | 2 +- .../shard/IndexingOperationListenerTests.java | 2 +- .../index/translog/TranslogTests.java | 2 +- 6 files changed, 40 insertions(+), 29 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e6f9516925878..d82bc1caab288 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -139,7 +139,7 @@ private static BulkItemResultHolder executeIndexRequest(final IndexRequest index return new BulkItemResultHolder(null, indexResult, bulkItemRequest); } else { IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), - indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); + indexResult.getSeqNo(), indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); return new BulkItemResultHolder(response, indexResult, bulkItemRequest); } } @@ -152,7 +152,7 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); } else { DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), - deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); + deleteResult.getSeqNo(), deleteResult.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); } } @@ -317,7 +317,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.IndexResult : result.getClass(); IndexRequest updateIndexRequest = translate.action(); final IndexResponse indexResponse = - new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), + new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated()); BytesReference indexSourceAsBytes = updateIndexRequest.source(); updateResponse = new UpdateResponse( @@ -343,7 +343,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.DeleteResult : result.getClass(); DeleteRequest updateDeleteRequest = translate.action(); DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), - updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), + updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound()); updateResponse = new UpdateResponse( deleteResponse.getShardInfo(), diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 31ba05817b756..342fd0d3dc5d8 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -307,20 +307,22 @@ public abstract static class Result { private final Operation.TYPE operationType; private final long version; private final long seqNo; + private final long primaryTerm; private final Exception failure; private final SetOnce freeze = new SetOnce<>(); private Translog.Location translogLocation; private long took; - protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { + protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo, long primaryTerm) { this.operationType = operationType; this.failure = failure; this.version = version; this.seqNo = seqNo; + this.primaryTerm = primaryTerm; } - protected Result(Operation.TYPE operationType, long version, long seqNo) { - this(operationType, null, version, seqNo); + protected Result(Operation.TYPE operationType, long version, long seqNo, long primaryTerm) { + this(operationType, null, version, seqNo, primaryTerm); } /** whether the operation had failure */ @@ -342,6 +344,15 @@ public long getSeqNo() { return seqNo; } + /** + * Get the primary term. + * + * @return the primary term + */ + public long getPrimaryTerm() { + return primaryTerm; + } + /** get the translog location after executing the operation */ public Translog.Location getTranslogLocation() { return translogLocation; @@ -389,7 +400,7 @@ public static class IndexResult extends Result { private final boolean created; public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { - super(Operation.TYPE.INDEX, version, seqNo); + super(Operation.TYPE.INDEX, version, seqNo, primaryTerm); this.created = created; } @@ -397,12 +408,12 @@ public IndexResult(long version, long seqNo, long primaryTerm, boolean created) * use in case of index operation failed before getting to internal engine * (e.g while preparing operation or updating mappings) * */ - public IndexResult(Exception failure, long version) { - this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); + public IndexResult(Exception failure, long version, long primaryTerm) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm); } - public IndexResult(Exception failure, long version, long seqNo) { - super(Operation.TYPE.INDEX, failure, version, seqNo); + public IndexResult(Exception failure, long version, long seqNo, long primaryTerm) { + super(Operation.TYPE.INDEX, failure, version, seqNo, primaryTerm); this.created = false; } @@ -416,13 +427,13 @@ public static class DeleteResult extends Result { private final boolean found; - public DeleteResult(long version, long seqNo, boolean found) { - super(Operation.TYPE.DELETE, version, seqNo); + public DeleteResult(long version, long seqNo, long primaryTerm, boolean found) { + super(Operation.TYPE.DELETE, version, seqNo, primaryTerm); this.found = found; } - public DeleteResult(Exception failure, long version, long seqNo, boolean found) { - super(Operation.TYPE.DELETE, failure, version, seqNo); + public DeleteResult(Exception failure, long version, long seqNo, long primaryTerm, boolean found) { + super(Operation.TYPE.DELETE, failure, version, seqNo, primaryTerm); this.found = found; } @@ -434,12 +445,12 @@ public boolean isFound() { static class NoOpResult extends Result { - NoOpResult(long seqNo) { - super(Operation.TYPE.NO_OP, 0, seqNo); + NoOpResult(long seqNo, long primaryTerm) { + super(Operation.TYPE.NO_OP, 0, seqNo, primaryTerm); } - NoOpResult(long seqNo, Exception failure) { - super(Operation.TYPE.NO_OP, failure, 0, seqNo); + NoOpResult(long seqNo, long primaryTerm, Exception failure) { + super(Operation.TYPE.NO_OP, failure, 0, seqNo, primaryTerm); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 107430b0a7405..77f6e3dd98300 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -896,7 +896,7 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } if (!deleteResult.hasFailure() && delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -990,12 +990,12 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { // there is no tragic event and such it must be a document level failure return new DeleteResult( - ex, plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); + ex, plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); } else { throw ex; } @@ -1028,7 +1028,7 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, long primaryTerm, boolean currentlyDeleted) { final DeleteResult deleteResult = - new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, currentlyDeleted == false); + new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, currentlyDeleted == false); return new DeletionStrategy( false, currentlyDeleted, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, deleteResult); } @@ -1057,7 +1057,7 @@ public NoOpResult noOp(final NoOp noOp) { try (ReleasableLock ignored = readLock.acquire()) { noOpResult = innerNoOp(noOp); } catch (final Exception e) { - noOpResult = new NoOpResult(noOp.seqNo(), e); + noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm(), e); } return noOpResult; } @@ -1066,7 +1066,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); + final NoOpResult noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm()); final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); noOpResult.setTook(System.nanoTime() - noOp.startTime()); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 2996362735f2d..75d3e1a52ada5 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -569,7 +569,7 @@ protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) request.type(), request.id(), indexResult.getSeqNo(), - primary.getPrimaryTerm(), + indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 0be902cae1b3d..b8a6c9c12a64f 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -137,7 +137,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid())); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc); - compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); + compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 2b8f7515ae7a6..f84d41528f393 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2085,7 +2085,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0); - Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); + Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, primaryTerm, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); out = new BytesStreamOutput(); From 16f02f8e71f410c2c1747ca44d6983cb38224dbd Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:22:00 -0400 Subject: [PATCH 09/17] Remove primary term from result --- .../action/bulk/TransportShardBulkAction.java | 23 ++++------- .../elasticsearch/index/engine/Engine.java | 41 +++++++------------ .../index/engine/InternalEngine.java | 25 ++++++----- .../bulk/TransportShardBulkActionTests.java | 19 ++------- .../ESIndexLevelReplicationTestCase.java | 2 +- .../shard/IndexingOperationListenerTests.java | 4 +- .../index/translog/TranslogTests.java | 4 +- .../recovery/RecoverySourceHandlerTests.java | 4 +- 8 files changed, 47 insertions(+), 75 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index d82bc1caab288..63a3b2af20acd 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -43,7 +43,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -65,13 +64,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.index.translog.Translog.Location; -import org.elasticsearch.action.bulk.BulkItemResultHolder; -import org.elasticsearch.action.bulk.BulkItemResponse; import java.io.IOException; import java.util.Map; -import java.util.Objects; import java.util.function.LongSupplier; /** Performs shard-level bulk (index, delete or update) operations */ @@ -139,7 +134,7 @@ private static BulkItemResultHolder executeIndexRequest(final IndexRequest index return new BulkItemResultHolder(null, indexResult, bulkItemRequest); } else { IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), - indexResult.getSeqNo(), indexResult.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); + indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); return new BulkItemResultHolder(response, indexResult, bulkItemRequest); } } @@ -152,7 +147,7 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); } else { DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), - deleteResult.getSeqNo(), deleteResult.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); + deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); } } @@ -284,7 +279,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request - result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); + result = new Engine.IndexResult(failure, updateRequest.version()); break; // out of retry loop } // execute translated update request @@ -317,7 +312,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.IndexResult : result.getClass(); IndexRequest updateIndexRequest = translate.action(); final IndexResponse indexResponse = - new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated()); BytesReference indexSourceAsBytes = updateIndexRequest.source(); updateResponse = new UpdateResponse( @@ -343,7 +338,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq assert result instanceof Engine.DeleteResult : result.getClass(); DeleteRequest updateDeleteRequest = translate.action(); DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), - updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), result.getPrimaryTerm(), + updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound()); updateResponse = new UpdateResponse( deleteResponse.getShardInfo(), @@ -452,8 +447,8 @@ public static Engine.IndexResult executeIndexRequestOnReplica( try { operation = prepareIndexOperationOnReplica(primaryResponse, request, replica); } catch (MapperParsingException e) { - return new Engine.IndexResult(e, primaryResponse.getVersion(), - primaryResponse.getSeqNo()); + return new Engine.IndexResult(e, primaryResponse.getVersion() + ); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); @@ -510,7 +505,7 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque mappingUpdater.updateMappings(mappingUpdate, primary.shardId(), request.type()); } } catch (MapperParsingException | IllegalArgumentException failure) { - return new Engine.IndexResult(failure, request.version(), primary.getPrimaryTerm()); + return new Engine.IndexResult(failure, request.version()); } // Verify that there are no more mappings that need to be applied. If there are failures, a @@ -523,7 +518,7 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque } catch (MapperParsingException | IllegalStateException e) { // there was an error in parsing the document that was not because // of pending mapping updates, so return a failure for the result - return new Engine.IndexResult(e, request.version(), primary.getPrimaryTerm()); + return new Engine.IndexResult(e, request.version()); } } else { // There was no mapping update, the operation is the same as the pre-update version. diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 342fd0d3dc5d8..b860a1f130755 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -307,22 +307,20 @@ public abstract static class Result { private final Operation.TYPE operationType; private final long version; private final long seqNo; - private final long primaryTerm; private final Exception failure; private final SetOnce freeze = new SetOnce<>(); private Translog.Location translogLocation; private long took; - protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo, long primaryTerm) { + protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { this.operationType = operationType; this.failure = failure; this.version = version; this.seqNo = seqNo; - this.primaryTerm = primaryTerm; } - protected Result(Operation.TYPE operationType, long version, long seqNo, long primaryTerm) { - this(operationType, null, version, seqNo, primaryTerm); + protected Result(Operation.TYPE operationType, long version, long seqNo) { + this(operationType, null, version, seqNo); } /** whether the operation had failure */ @@ -344,15 +342,6 @@ public long getSeqNo() { return seqNo; } - /** - * Get the primary term. - * - * @return the primary term - */ - public long getPrimaryTerm() { - return primaryTerm; - } - /** get the translog location after executing the operation */ public Translog.Location getTranslogLocation() { return translogLocation; @@ -399,8 +388,8 @@ public static class IndexResult extends Result { private final boolean created; - public IndexResult(long version, long seqNo, long primaryTerm, boolean created) { - super(Operation.TYPE.INDEX, version, seqNo, primaryTerm); + public IndexResult(long version, long seqNo, boolean created) { + super(Operation.TYPE.INDEX, version, seqNo); this.created = created; } @@ -408,12 +397,12 @@ public IndexResult(long version, long seqNo, long primaryTerm, boolean created) * use in case of index operation failed before getting to internal engine * (e.g while preparing operation or updating mappings) * */ - public IndexResult(Exception failure, long version, long primaryTerm) { - this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm); + public IndexResult(Exception failure, long version) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); } - public IndexResult(Exception failure, long version, long seqNo, long primaryTerm) { - super(Operation.TYPE.INDEX, failure, version, seqNo, primaryTerm); + public IndexResult(Exception failure, long version, long seqNo) { + super(Operation.TYPE.INDEX, failure, version, seqNo); this.created = false; } @@ -427,13 +416,13 @@ public static class DeleteResult extends Result { private final boolean found; - public DeleteResult(long version, long seqNo, long primaryTerm, boolean found) { - super(Operation.TYPE.DELETE, version, seqNo, primaryTerm); + public DeleteResult(long version, long seqNo, boolean found) { + super(Operation.TYPE.DELETE, version, seqNo); this.found = found; } - public DeleteResult(Exception failure, long version, long seqNo, long primaryTerm, boolean found) { - super(Operation.TYPE.DELETE, failure, version, seqNo, primaryTerm); + public DeleteResult(Exception failure, long version, long seqNo, boolean found) { + super(Operation.TYPE.DELETE, failure, version, seqNo); this.found = found; } @@ -446,11 +435,11 @@ public boolean isFound() { static class NoOpResult extends Result { NoOpResult(long seqNo, long primaryTerm) { - super(Operation.TYPE.NO_OP, 0, seqNo, primaryTerm); + super(Operation.TYPE.NO_OP, 0, seqNo); } NoOpResult(long seqNo, long primaryTerm, Exception failure) { - super(Operation.TYPE.NO_OP, failure, 0, seqNo, primaryTerm); + super(Operation.TYPE.NO_OP, failure, 0, seqNo); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 77f6e3dd98300..05f4352d5f0a3 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -612,7 +612,7 @@ public IndexResult index(Index index) throws IOException { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( - plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); + plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } if (indexResult.hasFailure() == false && index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -732,7 +732,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); - return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), plan.currentNotFoundOrDeleted); + return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { /* There is no tragic event recorded so this must be a document failure. @@ -748,7 +748,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) * we return a `MATCH_ANY` version to indicate no document was index. The value is * not used anyway */ - return new IndexResult(ex, Versions.MATCH_ANY, index.seqNo()); + return new IndexResult(ex, Versions.MATCH_ANY); } else { throw ex; } @@ -823,7 +823,7 @@ static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing) { static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long primaryTerm) { - final IndexResult result = new IndexResult(e, currentVersion, primaryTerm); + final IndexResult result = new IndexResult(e, currentVersion); return new IndexingStrategy( currentNotFoundOrDeleted, false, false, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } @@ -896,7 +896,7 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } if (!deleteResult.hasFailure() && delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { @@ -969,7 +969,7 @@ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException final DeletionStrategy plan; if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted); - plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, delete.primaryTerm(), currentlyDeleted); + plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, currentlyDeleted); } else { plan = DeletionStrategy.processNormally(currentlyDeleted, seqNoService().generateSeqNo(), @@ -990,12 +990,12 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( - plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { // there is no tragic event and such it must be a document level failure return new DeleteResult( - ex, plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), plan.currentlyDeleted == false); + ex, plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } else { throw ex; } @@ -1026,11 +1026,10 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, } static DeletionStrategy skipDueToVersionConflict( - VersionConflictEngineException e, long currentVersion, long primaryTerm, boolean currentlyDeleted) { - final DeleteResult deleteResult = - new DeleteResult(e, currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, currentlyDeleted == false); - return new DeletionStrategy( - false, currentlyDeleted, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, deleteResult); + VersionConflictEngineException e, long currentVersion, boolean currentlyDeleted) { + final long unassignedSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + final DeleteResult deleteResult = new DeleteResult(e, currentVersion, unassignedSeqNo, currentlyDeleted == false); + return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); } static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index a4bf0d77a1c26..f6e7d29bd5815 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -34,14 +33,9 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -52,13 +46,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.action.bulk.TransportShardBulkAction; -import org.elasticsearch.action.bulk.MappingUpdatePerformer; -import org.elasticsearch.action.bulk.BulkItemResultHolder; import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.CoreMatchers.equalTo; @@ -390,7 +379,7 @@ public void testUpdateReplicaRequestWithFailure() throws Exception { BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); Exception err = new ElasticsearchException("I'm dead <(x.x)>"); - Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); + Engine.IndexResult indexResult = new Engine.IndexResult(err, 0); BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest); @@ -428,7 +417,7 @@ public void testUpdateReplicaRequestWithConflictFailure() throws Exception { Exception err = new VersionConflictEngineException(shardId, "type", "id", "I'm conflicted <(;_;)>"); - Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); + Engine.IndexResult indexResult = new Engine.IndexResult(err, 0); BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest); @@ -584,7 +573,7 @@ public void verifyMappings(Engine.Index operation, public class IndexResultWithLocation extends Engine.IndexResult { private final Translog.Location location; public IndexResultWithLocation(long version, long seqNo, long primaryTerm, boolean created, Translog.Location newLocation) { - super(version, seqNo, primaryTerm, created); + super(version, seqNo, created); this.location = newLocation; } @@ -620,7 +609,7 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; protected FakeResult(long version, long seqNo, long primaryTerm, boolean created, Translog.Location location) { - super(version, seqNo, primaryTerm, created); + super(version, seqNo, created); this.location = location; } diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 75d3e1a52ada5..2996362735f2d 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -569,7 +569,7 @@ protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) request.type(), request.id(), indexResult.getSeqNo(), - indexResult.getPrimaryTerm(), + primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index b8a6c9c12a64f..88d8a075e1b3e 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -137,7 +137,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid())); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc); - compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true)); + compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); @@ -161,7 +161,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, false)); + compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index f84d41528f393..82af8d0ec1b28 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2074,7 +2074,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); - Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, primaryTerm, true); + Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); BytesStreamOutput out = new BytesStreamOutput(); @@ -2085,7 +2085,7 @@ public void testTranslogOpSerialization() throws Exception { Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0); - Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, primaryTerm, true); + Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); out = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d96350eec388b..40a92b11e7372 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -170,12 +170,12 @@ public void testSendSnapshotSendsOps() throws IOException { final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true))); } final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64); for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, 1, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true))); } operations.add(null); int totalOperations = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() { From 34429e61c385e9f002e83909249824263594d16c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:25:08 -0400 Subject: [PATCH 10/17] Revert "Revert "Merge branch 'master' into doc-write-response-primary-term"" This reverts commit 6c17c2ce07330262a573435f227865fa7524d0ea. --- TESTING.asciidoc | 3 +- Vagrantfile | 4 - .../gradle/vagrant/VagrantTestPlugin.groovy | 1 - .../segments/IndicesSegmentResponse.java | 27 ++ .../admin/indices/shrink/ShrinkRequest.java | 3 + .../action/bulk/BulkItemRequest.java | 9 +- .../action/bulk/BulkItemResponse.java | 46 +++- .../action/bulk/TransportShardBulkAction.java | 184 +++++++++---- .../replication/TransportWriteAction.java | 4 +- .../metadata/MetaDataCreateIndexService.java | 18 +- .../common/settings/IndexScopedSettings.java | 6 +- .../discovery/zen/ElectMasterService.java | 3 +- .../org/elasticsearch/index/IndexService.java | 29 +- .../elasticsearch/index/IndexSettings.java | 17 +- .../elasticsearch/index/IndexSortConfig.java | 247 ++++++++++++++++++ .../elasticsearch/index/engine/Engine.java | 31 ++- .../index/engine/EngineConfig.java | 13 +- .../index/engine/InternalEngine.java | 33 ++- .../elasticsearch/index/engine/Segment.java | 121 ++++++++- .../index/mapper/MapperService.java | 8 + .../elasticsearch/index/shard/IndexShard.java | 53 ++-- .../index/shard/StoreRecovery.java | 27 +- .../shard/TranslogRecoveryPerformer.java | 4 +- .../bucketmetrics/BucketMetricValue.java | 27 ++ .../InternalBucketMetricValue.java | 3 +- .../admin/indices/create/ShrinkIndexIT.java | 82 ++++++ .../bulk/TransportShardBulkActionTests.java | 54 +++- .../org/elasticsearch/index/IndexSortIT.java | 131 ++++++++++ .../index/IndexSortSettingsTests.java | 160 ++++++++++++ .../index/engine/InternalEngineTests.java | 78 +++++- .../index/engine/SegmentTests.java | 114 ++++++++ .../index/mapper/MapperServiceTests.java | 22 ++ .../ESIndexLevelReplicationTestCase.java | 114 ++++---- .../IndexLevelReplicationTests.java | 145 +++++++++- .../RecoveryDuringReplicationTests.java | 6 +- .../index/shard/IndexShardIT.java | 2 +- .../index/shard/RefreshListenersTests.java | 2 +- .../index/shard/StoreRecoveryTests.java | 34 ++- .../index/translog/TranslogTests.java | 14 +- .../indices/cluster/ClusterStateChanges.java | 1 + .../InternalPercentilesTestCase.java | 61 +++++ .../hdr/InternalHDRPercentilesTests.java | 60 +++++ .../InternalTDigestPercentilesTests.java | 30 +-- docs/plugins/discovery-azure-classic.asciidoc | 2 +- .../index-modules/index-sorting.asciidoc | 107 ++++++++ docs/reference/ingest/ingest-node.asciidoc | 61 ++++- .../mapping/fields/field-names-field.asciidoc | 4 +- docs/reference/setup/install/docker.asciidoc | 5 +- .../painless/CompilerSettings.java | 3 +- .../painless/ArrayLikeObjectTestCase.java | 4 +- .../painless/BasicExpressionTests.java | 4 +- .../painless/ImplementInterfacesTests.java | 22 +- .../elasticsearch/painless/LambdaTests.java | 4 +- .../elasticsearch/painless/RegexTests.java | 7 +- .../painless/ScriptTestCase.java | 38 +++ .../elasticsearch/painless/StringTests.java | 4 +- .../painless/WhenThingsGoWrongTests.java | 121 ++++++--- .../elasticsearch/backwards/IndexingIT.java | 42 +-- .../test/indices.sort/10_basic.yaml | 75 ++++++ .../index/shard/IndexShardTestCase.java | 2 +- .../elasticsearch/test/ESIntegTestCase.java | 22 ++ 61 files changed, 2211 insertions(+), 347 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/index/IndexSortConfig.java create mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java create mode 100644 core/src/test/java/org/elasticsearch/index/IndexSortIT.java create mode 100644 core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java create mode 100644 core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java create mode 100644 core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java create mode 100644 docs/reference/index-modules/index-sorting.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 43b53fd360f39..216100c07da39 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -345,7 +345,6 @@ VM running trusty by running These are the linux flavors the Vagrantfile currently supports: -* ubuntu-1204 aka precise * ubuntu-1404 aka trusty * ubuntu-1604 aka xenial * debian-8 aka jessie, the current debian stable distribution @@ -431,7 +430,7 @@ gradle vagrantFedora24#up ------------------------------------------------- Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up, -vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1204#up, +vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up. Once up, you can then connect to the VM using SSH from the elasticsearch directory: diff --git a/Vagrantfile b/Vagrantfile index 044394424047c..f008b339c3fa4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -22,10 +22,6 @@ # under the License. Vagrant.configure(2) do |config| - config.vm.define "ubuntu-1204" do |config| - config.vm.box = "elastic/ubuntu-12.04-x86_64" - ubuntu_common config - end config.vm.define "ubuntu-1404" do |config| config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 336ee207abfb9..2fb047e93051d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -22,7 +22,6 @@ class VagrantTestPlugin implements Plugin { 'oel-7', 'opensuse-13', 'sles-12', - 'ubuntu-1204', 'ubuntu-1404', 'ubuntu-1604' ] diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index ed9463d1544e1..43b1033044c8c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -19,6 +19,10 @@ package org.elasticsearch.action.admin.indices.segments; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -37,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.Locale; public class IndicesSegmentResponse extends BroadcastResponse implements ToXContent { @@ -140,6 +145,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (segment.getMergeId() != null) { builder.field(Fields.MERGE_ID, segment.getMergeId()); } + if (segment.getSegmentSort() != null) { + toXContent(builder, segment.getSegmentSort()); + } if (segment.ramTree != null) { builder.startArray(Fields.RAM_TREE); for (Accountable child : segment.ramTree.getChildResources()) { @@ -164,6 +172,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + static void toXContent(XContentBuilder builder, Sort sort) throws IOException { + builder.startArray("sort"); + for (SortField field : sort.getSort()) { + builder.startObject(); + builder.field("field", field.getField()); + if (field instanceof SortedNumericSortField) { + builder.field("mode", ((SortedNumericSortField) field).getSelector() + .toString().toLowerCase(Locale.ROOT)); + } else if (field instanceof SortedSetSortField) { + builder.field("mode", ((SortedSetSortField) field).getSelector() + .toString().toLowerCase(Locale.ROOT)); + } + builder.field("missing", field.getMissingValue()); + builder.field("reverse", field.getReverse()); + builder.endObject(); + } + builder.endArray(); + } + static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java index faa0a63c54dcf..6ea58200a4500 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java @@ -66,6 +66,9 @@ public ActionRequestValidationException validate() { if (shrinkIndexRequest == null) { validationException = addValidationError("shrink index request is missing", validationException); } + if (shrinkIndexRequest.settings().getByPrefix("index.sort.").isEmpty() == false) { + validationException = addValidationError("can't override index sort when shrinking index", validationException); + } return validationException; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 3023ecb1856a4..50da1476f49f3 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -38,7 +38,8 @@ public class BulkItemRequest implements Streamable { } - protected BulkItemRequest(int id, DocWriteRequest request) { + // NOTE: public for testing only + public BulkItemRequest(int id, DocWriteRequest request) { this.id = id; this.request = request; } @@ -56,13 +57,11 @@ public String index() { return request.indices()[0]; } - // NOTE: protected for testing only - protected BulkItemResponse getPrimaryResponse() { + BulkItemResponse getPrimaryResponse() { return primaryResponse; } - // NOTE: protected for testing only - protected void setPrimaryResponse(BulkItemResponse primaryResponse) { + void setPrimaryResponse(BulkItemResponse primaryResponse) { this.primaryResponse = primaryResponse; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 2e2a7f1540108..68cede5d25178 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -37,6 +37,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -171,17 +173,34 @@ public static class Failure implements Writeable, ToXContent { private final String id; private final Exception cause; private final RestStatus status; + private final long seqNo; - Failure(String index, String type, String id, Exception cause, RestStatus status) { + /** + * For write failures before operation was assigned a sequence number. + * + * use @{link {@link #Failure(String, String, String, Exception, long)}} + * to record operation sequence no with failure + */ + public Failure(String index, String type, String id, Exception cause) { + this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbersService.UNASSIGNED_SEQ_NO); + } + + public Failure(String index, String type, String id, Exception cause, RestStatus status) { + this(index, type, id, cause, status, SequenceNumbersService.UNASSIGNED_SEQ_NO); + } + + /** For write failures after operation was assigned a sequence number. */ + public Failure(String index, String type, String id, Exception cause, long seqNo) { + this(index, type, id, cause, ExceptionsHelper.status(cause), seqNo); + } + + public Failure(String index, String type, String id, Exception cause, RestStatus status, long seqNo) { this.index = index; this.type = type; this.id = id; this.cause = cause; this.status = status; - } - - public Failure(String index, String type, String id, Exception cause) { - this(index, type, id, cause, ExceptionsHelper.status(cause)); + this.seqNo = seqNo; } /** @@ -193,6 +212,11 @@ public Failure(StreamInput in) throws IOException { id = in.readOptionalString(); cause = in.readException(); status = ExceptionsHelper.status(cause); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + seqNo = in.readZLong(); + } else { + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } } @Override @@ -201,6 +225,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(getType()); out.writeOptionalString(getId()); out.writeException(getCause()); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + out.writeZLong(getSeqNo()); + } } @@ -246,6 +273,15 @@ public Exception getCause() { return cause; } + /** + * The operation sequence number generated by primary + * NOTE: {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * indicates sequence number was not generated by primary + */ + public long getSeqNo() { + return seqNo; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(INDEX_FIELD, index); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 63a3b2af20acd..332311a23d00b 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -108,12 +109,20 @@ protected boolean resolveIndex() { @Override public WritePrimaryResult shardOperationOnPrimary( BulkShardRequest request, IndexShard primary) throws Exception { + return performOnPrimary(request, primary, updateHelper, threadPool::absoluteTimeInMillis, new ConcreteMappingUpdatePerformer()); + } + + public static WritePrimaryResult performOnPrimary( + BulkShardRequest request, + IndexShard primary, + UpdateHelper updateHelper, + LongSupplier nowInMillisSupplier, + MappingUpdatePerformer mappingUpdater) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); Translog.Location location = null; - final MappingUpdatePerformer mappingUpdater = new ConcreteMappingUpdatePerformer(); for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { location = executeBulkItemRequest(metaData, primary, request, location, requestIndex, - updateHelper, threadPool::absoluteTimeInMillis, mappingUpdater); + updateHelper, nowInMillisSupplier, mappingUpdater); } BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); @@ -124,7 +133,6 @@ public WritePrimaryResult shardOperationOnP return new WritePrimaryResult<>(request, response, location, null, primary, logger); } - private static BulkItemResultHolder executeIndexRequest(final IndexRequest indexRequest, final BulkItemRequest bulkItemRequest, final IndexShard primary, @@ -203,7 +211,8 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul // Make sure to use request.index() here, if you // use docWriteRequest.index() it will use the // concrete index instead of an alias if used! - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)); + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), + failure, operationResult.getSeqNo())); } else { assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response"; return null; @@ -216,7 +225,7 @@ static Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexSha BulkShardRequest request, Translog.Location location, int requestIndex, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, - final MappingUpdatePerformer mappingUpdater) throws Exception { + final MappingUpdatePerformer mappingUpdater) throws Exception { final DocWriteRequest itemRequest = request.items()[requestIndex].request(); final DocWriteRequest.OpType opType = itemRequest.opType(); final BulkItemResultHolder responseHolder; @@ -367,58 +376,129 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq return new BulkItemResultHolder(updateResponse, result, replicaRequest); } - static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) { + /** Modes for executing item request on replica depending on corresponding primary execution result */ + public enum ReplicaItemExecutionMode { + + /** + * When primary execution succeeded + */ + NORMAL, + + /** + * When primary execution failed before sequence no was generated + * or primary execution was a noop (only possible when request is originating from pre-6.0 nodes) + */ + NOOP, + + /** + * When primary execution failed after sequence no was generated + */ + FAILURE + } + + static { + assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false: + "Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" + + " as the current minimum compatible version [" + + Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0"; + } + + /** + * Determines whether a bulk item request should be executed on the replica. + * @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures + * {@link ReplicaItemExecutionMode#FAILURE} upon primary execution failure after sequence no generation + * {@link ReplicaItemExecutionMode#NOOP} upon primary execution failure before sequence no generation or + * when primary execution resulted in noop (only possible for write requests from pre-6.0 nodes) + */ + static ReplicaItemExecutionMode replicaItemExecutionMode(final BulkItemRequest request, final int index) { final BulkItemResponse primaryResponse = request.getPrimaryResponse(); - assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request ["+ request.request() +"]"; - return primaryResponse.isFailed() == false && - primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP; + assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request [" + request.request() + "]"; + if (primaryResponse.isFailed()) { + return primaryResponse.getFailure().getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO + ? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op + : ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication + } else { + // NOTE: write requests originating from pre-6.0 nodes can send a no-op operation to + // the replica; we ignore replication + // TODO: remove noOp result check from primary response, when pre-6.0 nodes are not supported + // we should return ReplicationItemExecutionMode.NORMAL instead + return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP + ? ReplicaItemExecutionMode.NORMAL // execution successful on primary + : ReplicaItemExecutionMode.NOOP; // ignore replication + } } @Override public WriteReplicaResult shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { + final Translog.Location location = performOnReplica(request, replica); + return new WriteReplicaResult<>(request, location, null, replica, logger); + } + + public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; - if (shouldExecuteReplicaItem(item, i)) { - DocWriteRequest docWriteRequest = item.request(); - DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); - final Engine.Result operationResult; - try { - switch (docWriteRequest.opType()) { - case CREATE: - case INDEX: - operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica); - break; - case DELETE: - operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica); - break; - default: - throw new IllegalStateException("Unexpected request operation type on replica: " - + docWriteRequest.opType().getLowercase()); - } - if (operationResult.hasFailure()) { - // check if any transient write operation failures should be bubbled up - Exception failure = operationResult.getFailure(); - assert failure instanceof VersionConflictEngineException - || failure instanceof MapperParsingException - : "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" + - " failures. got " + failure; - if (!TransportActions.isShardNotAvailableException(failure)) { - throw failure; + final Engine.Result operationResult; + DocWriteRequest docWriteRequest = item.request(); + try { + switch (replicaItemExecutionMode(item, i)) { + case NORMAL: + final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); + switch (docWriteRequest.opType()) { + case CREATE: + case INDEX: + operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica); + break; + case DELETE: + operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica); + break; + default: + throw new IllegalStateException("Unexpected request operation type on replica: " + + docWriteRequest.opType().getLowercase()); } - } else { - location = locationToSync(location, operationResult.getTranslogLocation()); - } - } catch (Exception e) { - // if its not an ignore replica failure, we need to make sure to bubble up the failure - // so we will fail the shard - if (!TransportActions.isShardNotAvailableException(e)) { - throw e; - } + assert operationResult != null : "operation result must never be null when primary response has no failure"; + location = syncOperationResultOrThrow(operationResult, location); + break; + case NOOP: + break; + case FAILURE: + final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure(); + assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned"; + operationResult = executeFailureNoOpOnReplica(failure, replica); + assert operationResult != null : "operation result must never be null when primary response has no failure"; + location = syncOperationResultOrThrow(operationResult, location); + break; + default: + throw new IllegalStateException("illegal replica item execution mode for: " + item.request()); + } + } catch (Exception e) { + // if its not an ignore replica failure, we need to make sure to bubble up the failure + // so we will fail the shard + if (!TransportActions.isShardNotAvailableException(e)) { + throw e; } } } - return new WriteReplicaResult<>(request, location, null, replica, logger); + return location; + } + + /** Syncs operation result to the translog or throws a shard not available failure */ + private static Translog.Location syncOperationResultOrThrow(final Engine.Result operationResult, + final Translog.Location currentLocation) throws Exception { + final Translog.Location location; + if (operationResult.hasFailure()) { + // check if any transient write operation failures should be bubbled up + Exception failure = operationResult.getFailure(); + assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure; + if (!TransportActions.isShardNotAvailableException(failure)) { + throw failure; + } else { + location = currentLocation; + } + } else { + location = locationToSync(currentLocation, operationResult.getTranslogLocation()); + } + return location; } private static Translog.Location locationToSync(Translog.Location current, @@ -438,7 +518,7 @@ private static Translog.Location locationToSync(Translog.Location current, * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - public static Engine.IndexResult executeIndexRequestOnReplica( + private static Engine.IndexResult executeIndexRequestOnReplica( DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException { @@ -481,7 +561,7 @@ static Engine.Index prepareIndexOperationOnReplica( } /** Utility method to prepare an index operation on primary shards */ - static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { + private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { final SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(), request.getContentType()) @@ -491,8 +571,8 @@ static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexSh } /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */ - public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, - MappingUpdatePerformer mappingUpdater) throws Exception { + static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, + MappingUpdatePerformer mappingUpdater) throws Exception { // Update the mappings if parsing the documents includes new dynamic updates final Engine.Index preUpdateOperation; final Mapping mappingUpdate; @@ -542,6 +622,12 @@ private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteRespons return replica.delete(delete); } + private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, IndexShard replica) throws IOException { + final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOp( + primaryFailure.getSeqNo(), primaryFailure.getMessage()); + return replica.markSeqNoAsNoOp(noOp); + } + class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { public void updateMappings(final Mapping update, final ShardId shardId, diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index ae4ae78c03386..938e90b82b2fb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -94,8 +94,10 @@ protected abstract WriteReplicaResult shardOperationOnReplica( /** * Result of taking the action on the primary. + * + * NOTE: public for testing */ - protected static class WritePrimaryResult, + public static class WritePrimaryResult, Response extends ReplicationResponse & WriteResponse> extends PrimaryResult implements RespondingWriteResult { boolean finishedAsyncActions; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 2cb93373700f3..a3292e2cfd445 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -374,9 +374,18 @@ public ClusterState execute(ClusterState currentState) throws Exception { throw e; } + if (request.shrinkFrom() == null) { + // now that the mapping is merged we can validate the index sort. + // we cannot validate for index shrinking since the mapping is empty + // at this point. The validation will take place later in the process + // (when all shards are copied in a single place). + indexService.getIndexSortSupplier().get(); + } + // the context is only used for validation so it's fine to pass fake values for the shard id and the current // timestamp final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); + for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry); @@ -581,10 +590,11 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, static void prepareShrinkIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, Index shrinkFromIndex, String shrinkIntoName) { final IndexMetaData sourceMetaData = currentState.metaData().index(shrinkFromIndex.getName()); + final List nodesToAllocateOn = validateShrinkIndex(currentState, shrinkFromIndex.getName(), mappingKeys, shrinkIntoName, indexSettingsBuilder.build()); - final Predicate analysisSimilarityPredicate = (s) -> s.startsWith("index.similarity.") - || s.startsWith("index.analysis."); + final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") + || s.startsWith("index.analysis.") || s.startsWith("index.sort."); indexSettingsBuilder // we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away // once we are allocated. @@ -592,11 +602,11 @@ static void prepareShrinkIndexSettings(ClusterState currentState, Set ma Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) // we only try once and then give up with a shrink index .put("index.allocation.max_retries", 1) - // now copy all similarity / analysis settings - this overrides all settings from the user unless they + // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) - .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate)) + .put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index efbe7acf5e1b6..4094d69eddeb8 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.common.settings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingSlowLog; @@ -100,6 +100,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + IndexSortConfig.INDEX_SORT_FIELD_SETTING, + IndexSortConfig.INDEX_SORT_ORDER_SETTING, + IndexSortConfig.INDEX_SORT_MISSING_SETTING, + IndexSortConfig.INDEX_SORT_MODE_SETTING, IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING, IndexSettings.INDEX_WARMER_ENABLED_SETTING, IndexSettings.INDEX_REFRESH_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 92b20c5199b89..024c50fb6e090 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -150,7 +150,8 @@ public DiscoveryNode tieBreakActiveMasters(Collection activeMaste } public boolean hasEnoughMasterNodes(Iterable nodes) { - return minimumMasterNodes < 1 || countMasterNodes(nodes) >= minimumMasterNodes; + final int count = countMasterNodes(nodes); + return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes); } public boolean hasTooManyMasterNodes(Iterable nodes) { diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index e528dde7179b9..9a24f8eb68df7 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -20,8 +20,8 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; @@ -84,6 +84,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.LongSupplier; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -119,6 +120,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ScriptService scriptService; private final ClusterService clusterService; private final Client client; + private Supplier indexSortSupplier; public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, NamedXContentRegistry xContentRegistry, @@ -153,6 +155,16 @@ public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp"); })); this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); + if (indexSettings.getIndexSortConfig().hasIndexSort()) { + // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. + // The sort order is validated right after the merge of the mapping later in the process. + this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( + mapperService::fullName, + indexFieldData::getForField + ); + } else { + this.indexSortSupplier = () -> null; + } this.shardStoreDeleter = shardStoreDeleter; this.bigArrays = bigArrays; this.threadPool = threadPool; @@ -243,6 +255,10 @@ public SimilarityService similarityService() { return similarityService; } + public Supplier getIndexSortSupplier() { + return indexSortSupplier; + } + public synchronized void close(final String reason, boolean delete) throws IOException { if (closed.compareAndSet(false, true)) { deleted.compareAndSet(false, delete); @@ -350,10 +366,10 @@ public synchronized IndexShard createShard(ShardRouting routing) throws IOExcept }; store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); - indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, - indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, - () -> globalCheckpointSyncer.accept(shardId), - searchOperationListeners, indexingOperationListeners); + indexShard = new IndexShard(routing, this.indexSettings, path, store, indexSortSupplier, + indexCache, mapperService, similarityService, indexFieldData, engineFactory, + eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, + () -> globalCheckpointSyncer.accept(shardId), searchOperationListeners, indexingOperationListeners); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); @@ -401,7 +417,8 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); + logger.debug((org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 011229256af65..8acdf7d1360cb 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -98,11 +98,11 @@ public final class IndexSettings { Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, Property.Dynamic, Property.IndexScope); /** * Index setting describing the maximum number of filters clauses that can be used - * in an adjacency_matrix aggregation. The max number of buckets produced by + * in an adjacency_matrix aggregation. The max number of buckets produced by * N filters is (N*N)/2 so a limit of 100 filters is imposed by default. */ public static final Setting MAX_ADJACENCY_MATRIX_FILTERS_SETTING = - Setting.intSetting("index.max_adjacency_matrix_filters", 100, 2, Property.Dynamic, Property.IndexScope); + Setting.intSetting("index.max_adjacency_matrix_filters", 100, 2, Property.Dynamic, Property.IndexScope); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), @@ -176,6 +176,7 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; private final MergePolicyConfig mergePolicyConfig; + private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); private volatile boolean warmerEnabled; @@ -278,6 +279,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.indexSortConfig = new IndexSortConfig(this); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed); @@ -499,7 +501,7 @@ public int getMaxResultWindow() { private void setMaxResultWindow(int maxResultWindow) { this.maxResultWindow = maxResultWindow; } - + /** * Returns the max number of filters in adjacency_matrix aggregation search requests */ @@ -509,7 +511,7 @@ public int getMaxAdjacencyMatrixFilters() { private void setMaxAdjacencyMatrixFilters(int maxAdjacencyFilters) { this.maxAdjacencyMatrixFilters = maxAdjacencyFilters; - } + } /** * Returns the maximum rescore window for search requests. @@ -574,5 +576,12 @@ private void setMaxSlicesPerScroll(int value) { this.maxSlicesPerScroll = value; } + /** + * Returns the index sort config that should be used for this index. + */ + public IndexSortConfig getIndexSortConfig() { + return indexSortConfig; + } + public IndexScopedSettings getScopedSettings() { return scopedSettings;} } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java new file mode 100644 index 0000000000000..1d3f5f0fc23ea --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -0,0 +1,247 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; +import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.function.Function; + +/** + * Holds all the information that is used to build the sort order of an index. + * + * The index sort settings are final and can be defined only at index creation. + * These settings are divided in four lists that are merged during the initialization of this class: + *
    + *
  • `index.sort.field`: the field or a list of field to use for the sort
  • + *
  • `index.sort.order` the {@link SortOrder} to use for the field or a list of {@link SortOrder} + * for each field defined in `index.sort.field`. + *
  • + *
  • `index.sort.mode`: the {@link MultiValueMode} to use for the field or a list of orders + * for each field defined in `index.sort.field`. + *
  • + *
  • `index.sort.missing`: the missing value to use for the field or a list of missing values + * for each field defined in `index.sort.field` + *
  • + *
+ * +**/ +public final class IndexSortConfig { + /** + * The list of field names + */ + public static final Setting> INDEX_SORT_FIELD_SETTING = + Setting.listSetting("index.sort.field", Collections.emptyList(), + Function.identity(), Setting.Property.IndexScope, Setting.Property.Final); + + /** + * The {@link SortOrder} for each specified sort field (ie. asc or desc). + */ + public static final Setting> INDEX_SORT_ORDER_SETTING = + Setting.listSetting("index.sort.order", Collections.emptyList(), + IndexSortConfig::parseOrderMode, Setting.Property.IndexScope, Setting.Property.Final); + + + /** + * The {@link MultiValueMode} for each specified sort field (ie. max or min). + */ + public static final Setting> INDEX_SORT_MODE_SETTING = + Setting.listSetting("index.sort.mode", Collections.emptyList(), + IndexSortConfig::parseMultiValueMode, Setting.Property.IndexScope, Setting.Property.Final); + + /** + * The missing value for each specified sort field (ie. _first or _last) + */ + public static final Setting> INDEX_SORT_MISSING_SETTING = + Setting.listSetting("index.sort.missing", Collections.emptyList(), + IndexSortConfig::validateMissingValue, Setting.Property.IndexScope, Setting.Property.Final); + + private static String validateMissingValue(String missing) { + if ("_last".equals(missing) == false && "_first".equals(missing) == false) { + throw new IllegalArgumentException("Illegal missing value:[" + missing + "], " + + "must be one of [_last, _first]"); + } + return missing; + } + + private static SortOrder parseOrderMode(String value) { + try { + return SortOrder.fromString(value); + } catch (Exception e) { + throw new IllegalArgumentException("Illegal sort order:" + value); + } + } + + private static MultiValueMode parseMultiValueMode(String value) { + MultiValueMode mode = MultiValueMode.fromString(value); + if (mode != MultiValueMode.MAX && mode != MultiValueMode.MIN) { + throw new IllegalArgumentException("Illegal index sort mode:[" + mode + "], " + + "must be one of [" + MultiValueMode.MAX + ", " + MultiValueMode.MIN + "]"); + } + return mode; + } + + // visible for tests + final FieldSortSpec[] sortSpecs; + + public IndexSortConfig(IndexSettings indexSettings) { + final Settings settings = indexSettings.getSettings(); + List fields = INDEX_SORT_FIELD_SETTING.get(settings); + this.sortSpecs = fields.stream() + .map((name) -> new FieldSortSpec(name)) + .toArray(FieldSortSpec[]::new); + + if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED)) { + /** + * This index might be assigned to a node where the index sorting feature is not available + * (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later. + */ + throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() + + ", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1_UNRELEASED); + } + + if (INDEX_SORT_ORDER_SETTING.exists(settings)) { + List orders = INDEX_SORT_ORDER_SETTING.get(settings); + if (orders.size() != sortSpecs.length) { + throw new IllegalArgumentException("index.sort.field:" + fields + + " index.sort.order:" + orders.toString() + ", size mismatch"); + } + for (int i = 0; i < sortSpecs.length; i++) { + sortSpecs[i].order = orders.get(i); + } + } + + if (INDEX_SORT_MODE_SETTING.exists(settings)) { + List modes = INDEX_SORT_MODE_SETTING.get(settings); + if (modes.size() != sortSpecs.length) { + throw new IllegalArgumentException("index.sort.field:" + fields + + " index.sort.mode:" + modes + ", size mismatch"); + } + for (int i = 0; i < sortSpecs.length; i++) { + sortSpecs[i].mode = modes.get(i); + } + } + + if (INDEX_SORT_MISSING_SETTING.exists(settings)) { + List missingValues = INDEX_SORT_MISSING_SETTING.get(settings); + if (missingValues.size() != sortSpecs.length) { + throw new IllegalArgumentException("index.sort.field:" + fields + + " index.sort.missing:" + missingValues + ", size mismatch"); + } + for (int i = 0; i < sortSpecs.length; i++) { + sortSpecs[i].missingValue = missingValues.get(i); + } + } + } + + + /** + * Returns true if the index should be sorted + */ + public boolean hasIndexSort() { + return sortSpecs.length > 0; + } + + /** + * Builds the {@link Sort} order from the settings for this index + * or returns null if this index has no sort. + */ + public Sort buildIndexSort(Function fieldTypeLookup, + Function> fieldDataLookup) { + if (hasIndexSort() == false) { + return null; + } + + final SortField[] sortFields = new SortField[sortSpecs.length]; + for (int i = 0; i < sortSpecs.length; i++) { + FieldSortSpec sortSpec = sortSpecs[i]; + final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); + if (ft == null) { + throw new IllegalArgumentException("unknown index sort field:[" + sortSpec.field + "]"); + } + boolean reverse = sortSpec.order == null ? false : (sortSpec.order == SortOrder.DESC); + MultiValueMode mode = sortSpec.mode; + if (mode == null) { + mode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; + } + IndexFieldData fieldData; + try { + fieldData = fieldDataLookup.apply(ft); + } catch (Exception e) { + throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); + } + if (fieldData == null) { + throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); + } + sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + validateIndexSortField(sortFields[i]); + } + return new Sort(sortFields); + } + + private void validateIndexSortField(SortField sortField) { + SortField.Type type = getSortFieldType(sortField); + if (ALLOWED_INDEX_SORT_TYPES.contains(type) == false) { + throw new IllegalArgumentException("invalid index sort field:[" + sortField.getField() + "]"); + } + } + + static class FieldSortSpec { + final String field; + SortOrder order; + MultiValueMode mode; + String missingValue; + + FieldSortSpec(String field) { + this.field = field; + } + } + + /** We only allow index sorting on these types */ + private static final EnumSet ALLOWED_INDEX_SORT_TYPES = EnumSet.of( + SortField.Type.STRING, + SortField.Type.LONG, + SortField.Type.INT, + SortField.Type.DOUBLE, + SortField.Type.FLOAT + ); + + static SortField.Type getSortFieldType(SortField sortField) { + if (sortField instanceof SortedSetSortField) { + return SortField.Type.STRING; + } else if (sortField instanceof SortedNumericSortField) { + return ((SortedNumericSortField) sortField).getNumericType(); + } else { + return sortField.getType(); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index b860a1f130755..7c1bad4fcef3f 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -363,7 +363,6 @@ public Operation.TYPE getOperationType() { void setTranslogLocation(Translog.Location translogLocation) { if (freeze.get() == null) { - assert failure == null : "failure has to be null to set translog location"; this.translogLocation = translogLocation; } else { throw new IllegalStateException("result is already frozen"); @@ -432,7 +431,7 @@ public boolean isFound() { } - static class NoOpResult extends Result { + public static class NoOpResult extends Result { NoOpResult(long seqNo, long primaryTerm) { super(Operation.TYPE.NO_OP, 0, seqNo); @@ -706,6 +705,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole } final SegmentReader segmentReader = segmentReader(reader.reader()); segment.memoryInBytes = segmentReader.ramBytesUsed(); + segment.segmentSort = info.info.getIndexSort(); if (verbose) { segment.ramTree = Accountables.namedAccountable("root", segmentReader); } @@ -1154,24 +1154,31 @@ public String reason() { return reason; } - public NoOp( - final Term uid, - final long seqNo, - final long primaryTerm, - final long version, - final VersionType versionType, - final Origin origin, - final long startTime, - final String reason) { - super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); + public NoOp(final long seqNo, final long primaryTerm, final Origin origin, final long startTime, final String reason) { + super(null, seqNo, primaryTerm, Versions.NOT_FOUND, null, origin, startTime); this.reason = reason; } + @Override + public Term uid() { + throw new UnsupportedOperationException(); + } + @Override public String type() { throw new UnsupportedOperationException(); } + @Override + public long version() { + throw new UnsupportedOperationException(); + } + + @Override + public VersionType versionType() { + throw new UnsupportedOperationException(); + } + @Override String id() { throw new UnsupportedOperationException(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 60dddc4d40db1..7852d2c2db089 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; @@ -69,6 +70,8 @@ public final class EngineConfig { private final long maxUnsafeAutoIdTimestamp; @Nullable private final ReferenceManager.RefreshListener refreshListeners; + @Nullable + private final Sort indexSort; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -113,7 +116,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter, ReferenceManager.RefreshListener refreshListeners, - long maxUnsafeAutoIdTimestamp) { + long maxUnsafeAutoIdTimestamp, Sort indexSort) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -143,6 +146,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP : "maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp; this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp; + this.indexSort = indexSort; } /** @@ -335,4 +339,11 @@ public ReferenceManager.RefreshListener getRefreshListeners() { public long getMaxUnsafeAutoIdTimestamp() { return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS) ? maxUnsafeAutoIdTimestamp : Long.MAX_VALUE; } + + /** + * Return the sort order of this index, or null if the index has no sort. + */ + public Sort getIndexSort() { + return indexSort; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 05f4352d5f0a3..2c59869868162 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -614,10 +614,16 @@ public IndexResult index(Index index) throws IOException { indexResult = new IndexResult( plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } - if (indexResult.hasFailure() == false && - index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - Translog.Location location = - translog.add(new Translog.Index(index, indexResult)); + if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + final Translog.Location location; + if (indexResult.hasFailure() == false) { + location = translog.add(new Translog.Index(index, indexResult)); + } else if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + // if we have document failure, record it as a no-op in the translog with the generated seq_no + location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().getMessage())); + } else { + location = null; + } indexResult.setTranslogLocation(location); } if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -748,7 +754,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) * we return a `MATCH_ANY` version to indicate no document was index. The value is * not used anyway */ - return new IndexResult(ex, Versions.MATCH_ANY); + return new IndexResult(ex, Versions.MATCH_ANY, plan.seqNoForIndexing); } else { throw ex; } @@ -898,10 +904,16 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = new DeleteResult( plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); } - if (!deleteResult.hasFailure() && - delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - Translog.Location location = - translog.add(new Translog.Delete(delete, deleteResult)); + if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + final Translog.Location location; + if (deleteResult.hasFailure() == false) { + location = translog.add(new Translog.Delete(delete, deleteResult)); + } else if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), + delete.primaryTerm(), deleteResult.getFailure().getMessage())); + } else { + location = null; + } deleteResult.setTranslogLocation(location); } if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -1589,6 +1601,9 @@ private IndexWriterConfig getIndexWriterConfig(boolean create) { iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh + if (config().getIndexSort() != null) { + iwc.setIndexSort(config().getIndexSort()); + } return iwc; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Segment.java b/core/src/main/java/org/elasticsearch/index/engine/Segment.java index 7d3882fd9b654..565ed9f1d83f5 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -19,8 +19,15 @@ package org.elasticsearch.index.engine; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -46,6 +53,7 @@ public class Segment implements Streamable { public Boolean compound = null; public String mergeId; public long memoryInBytes; + public Sort segmentSort; public Accountable ramTree = null; Segment() { @@ -113,6 +121,13 @@ public long getMemoryInBytes() { return this.memoryInBytes; } + /** + * Return the sort order of this segment, or null if the segment has no sort. + */ + public Sort getSegmentSort() { + return segmentSort; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -153,6 +168,11 @@ public void readFrom(StreamInput in) throws IOException { // verbose mode ramTree = readRamTree(in); } + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + segmentSort = readSegmentSort(in); + } else { + segmentSort = null; + } } @Override @@ -167,12 +187,106 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); out.writeLong(memoryInBytes); - + boolean verbose = ramTree != null; out.writeBoolean(verbose); if (verbose) { writeRamTree(out, ramTree); } + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + writeSegmentSort(out, segmentSort); + } + } + + Sort readSegmentSort(StreamInput in) throws IOException { + int size = in.readVInt(); + if (size == 0) { + return null; + } + SortField[] fields = new SortField[size]; + for (int i = 0; i < size; i++) { + String field = in.readString(); + byte type = in.readByte(); + if (type == 0) { + Boolean missingFirst = in.readOptionalBoolean(); + boolean max = in.readBoolean(); + boolean reverse = in.readBoolean(); + fields[i] = new SortedSetSortField(field, reverse, + max ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); + if (missingFirst != null) { + fields[i].setMissingValue(missingFirst ? + SortedSetSortField.STRING_FIRST : SortedSetSortField.STRING_LAST); + } + } else { + Object missing = in.readGenericValue(); + boolean max = in.readBoolean(); + boolean reverse = in.readBoolean(); + final SortField.Type numericType; + switch (type) { + case 1: + numericType = SortField.Type.INT; + break; + case 2: + numericType = SortField.Type.FLOAT; + break; + case 3: + numericType = SortField.Type.DOUBLE; + break; + case 4: + numericType = SortField.Type.LONG; + break; + default: + throw new IOException("invalid index sort type:[" + type + + "] for numeric field:[" + field + "]"); + } + fields[i] = new SortedNumericSortField(field, numericType, reverse, max ? + SortedNumericSelector.Type.MAX : SortedNumericSelector.Type.MIN); + if (missing != null) { + fields[i].setMissingValue(missing); + } + } + } + return new Sort(fields); + } + + void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { + if (sort == null) { + out.writeVInt(0); + return; + } + out.writeVInt(sort.getSort().length); + for (SortField field : sort.getSort()) { + out.writeString(field.getField()); + if (field instanceof SortedSetSortField) { + out.writeByte((byte) 0); + out.writeOptionalBoolean(field.getMissingValue() == null ? + null : field.getMissingValue() == SortField.STRING_FIRST); + out.writeBoolean(((SortedSetSortField) field).getSelector() == SortedSetSelector.Type.MAX); + out.writeBoolean(field.getReverse()); + } else if (field instanceof SortedNumericSortField) { + switch (((SortedNumericSortField) field).getNumericType()) { + case INT: + out.writeByte((byte) 1); + break; + case FLOAT: + out.writeByte((byte) 2); + break; + case DOUBLE: + out.writeByte((byte) 3); + break; + case LONG: + out.writeByte((byte) 4); + break; + default: + throw new IOException("invalid index sort field:" + field); + } + out.writeGenericValue(field.getMissingValue()); + out.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX); + out.writeBoolean(field.getReverse()); + } else { + throw new IOException("invalid index sort field:" + field + ""); + } + } } Accountable readRamTree(StreamInput in) throws IOException { @@ -188,7 +302,7 @@ Accountable readRamTree(StreamInput in) throws IOException { } return Accountables.namedAccountable(name, children, bytes); } - + // the ram tree is written recursively since the depth is fairly low (5 or 6) void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); @@ -214,6 +328,7 @@ public String toString() { ", compound=" + compound + ", mergeId='" + mergeId + '\'' + ", memoryInBytes=" + memoryInBytes + + (segmentSort != null ? ", sort=" + segmentSort : "") + '}'; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 68983bcf63ff4..55cfebe41c1db 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.query.QueryShardContext; @@ -438,6 +439,7 @@ private synchronized Map internalMerge(@Nullable Documen checkNestedFieldsLimit(fullPathObjectMappers); checkDepthLimit(fullPathObjectMappers.keySet()); } + checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested); for (Map.Entry entry : mappers.entrySet()) { if (entry.getKey().equals(DEFAULT_MAPPING)) { @@ -619,6 +621,12 @@ private void checkPartitionedIndexConstraints(DocumentMapper newMapper) { } } + private void checkIndexSortCompatibility(IndexSortConfig sortConfig, boolean hasNested) { + if (sortConfig.hasIndexSort() && hasNested) { + throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); + } + } + public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 1dee58ced002b..d1ca4f13a42da 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Lock; @@ -145,6 +146,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { @@ -170,7 +172,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final TranslogConfig translogConfig; private final IndexEventListener indexEventListener; private final QueryCachingPolicy cachingPolicy; - + private final Supplier indexSortSupplier; /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this @@ -225,9 +227,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl */ private final RefreshListeners refreshListeners; - public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, - MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, - @Nullable EngineFactory engineFactory, + public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, + Supplier indexSortSupplier, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, + IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, ThreadPool threadPool, BigArrays bigArrays, Engine.Warmer warmer, Runnable globalCheckpointSyncer, List searchOperationListener, List listeners) throws IOException { super(shardRouting.shardId(), indexSettings); @@ -241,6 +243,7 @@ public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardP Objects.requireNonNull(store, "Store must be provided to the index shard"); this.engineFactory = engineFactory == null ? new InternalEngineFactory() : engineFactory; this.store = store; + this.indexSortSupplier = indexSortSupplier; this.indexEventListener = indexEventListener; this.threadPool = threadPool; this.mapperService = mapperService; @@ -289,6 +292,12 @@ public Store store() { return this.store; } + /** + * Return the sort order of this index, or null if the index has no sort. + */ + public Sort getIndexSort() { + return indexSortSupplier.get(); + } /** * returns true if this shard supports indexing (i.e., write) operations. */ @@ -569,12 +578,21 @@ private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOExc return result; } + public Engine.NoOp prepareMarkingSeqNoAsNoOp(long seqNo, String reason) { + verifyReplicationTarget(); + long startTime = System.nanoTime(); + return new Engine.NoOp(seqNo, primaryTerm, Engine.Operation.Origin.REPLICA, startTime, reason); + } + + public Engine.NoOpResult markSeqNoAsNoOp(Engine.NoOp noOp) throws IOException { + ensureWriteAllowed(noOp); + Engine engine = getEngine(); + return engine.noOp(noOp); + } + public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) { verifyPrimary(); - final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); - final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); - final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); - final Term uid = MappedFieldType.extractTerm(uidQuery); + final Term uid = extractUid(type, id); return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, Engine.Operation.Origin.PRIMARY); } @@ -582,15 +600,12 @@ public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType) { verifyReplicationTarget(); - final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); - final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); - final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); - final Term uid = MappedFieldType.extractTerm(uidQuery); + final Term uid = extractUid(type, id); return prepareDelete(type, id, uid, seqNo, primaryTerm, version, versionType, Engine.Operation.Origin.REPLICA); } - static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, - VersionType versionType, Engine.Operation.Origin origin) { + private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, + VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime); } @@ -601,6 +616,13 @@ public Engine.DeleteResult delete(Engine.Delete delete) throws IOException { return delete(engine, delete); } + private Term extractUid(String type, String id) { + final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); + final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType(); + final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null); + return MappedFieldType.extractTerm(uidQuery); + } + private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException { active.set(true); final Engine.DeleteResult result; @@ -1775,11 +1797,12 @@ private DocumentMapperForType docMapper(String type) { private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, long maxUnsafeAutoIdTimestamp) { final IndexShardRecoveryPerformer translogRecoveryPerformer = new IndexShardRecoveryPerformer(shardId, mapperService, logger); + Sort indexSort = indexSortSupplier.get(); return new EngineConfig(openMode, shardId, threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners, - maxUnsafeAutoIdTimestamp); + maxUnsafeAutoIdTimestamp, indexSort); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 04c2113dea34b..6cfaca8c45b4b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.Sort; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; @@ -109,11 +110,14 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate mappingUpdateConsumer.accept(mapping.key, mapping.value); } indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + // now that the mapping is merged we can validate the index sort configuration. + Sort indexSort = indexShard.getIndexSort(); return executeRecovery(indexShard, () -> { logger.debug("starting recovery from local shards {}", shards); try { final Directory directory = indexShard.store().directory(); // don't close this directory!! - addIndices(indexShard.recoveryState().getIndex(), directory, shards.stream().map(s -> s.getSnapshotDirectory()) + addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, + shards.stream().map(s -> s.getSnapshotDirectory()) .collect(Collectors.toList()).toArray(new Directory[shards.size()])); internalRecoverFromStore(indexShard); // just trigger a merge to do housekeeping on the @@ -128,16 +132,19 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate return false; } - void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException { + void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Sort indexSort, Directory... sources) throws IOException { target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); - try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), - new IndexWriterConfig(null) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE); + if (indexSort != null) { + iwc.setIndexSort(indexSort); + } + try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), iwc)) { writer.addIndexes(sources); writer.commit(); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index d5aadc1664ea4..8842cbf3c0bd4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapperForType; @@ -31,7 +30,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; @@ -182,7 +180,7 @@ private void performRecoveryOperation(Engine engine, Translog.Operation operatio final String reason = noOp.reason(); logger.trace("[translog] recover [no_op] op [({}, {})] of [{}]", seqNo, primaryTerm, reason); final Engine.NoOp engineNoOp = - new Engine.NoOp(null, seqNo, primaryTerm, 0, VersionType.INTERNAL, origin, System.nanoTime(), reason); + new Engine.NoOp(seqNo, primaryTerm, origin, System.nanoTime(), reason); noOp(engine, engineNoOp); break; default: diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java new file mode 100644 index 0000000000000..be22679a4e1bf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; + +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; + +public interface BucketMetricValue extends NumericMetricsAggregation.SingleValue { + + String[] keys(); +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java index 76284d275553f..9c9da2f26bd53 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java @@ -31,7 +31,7 @@ import java.util.List; import java.util.Map; -public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue { +public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue implements BucketMetricValue { public static final String NAME = "bucket_metric_value"; private double value; @@ -72,6 +72,7 @@ public double value() { return value; } + @Override public String[] keys() { return keys; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 9ab6551d6fd07..dea04d1710685 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -19,9 +19,17 @@ package org.elasticsearch.action.admin.indices.create; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterInfoService; @@ -33,6 +41,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -44,6 +53,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; public class ShrinkIndexIT extends ESIntegTestCase { @@ -250,4 +260,76 @@ public void testCreateShrinkIndexFails() throws Exception { ensureGreen(); assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); } + + public void testCreateShrinkWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source") + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("sort.order", "desc") + .put("number_of_shards", 8) + .put("number_of_replicas", 0) + ) + .addMapping("t1", "id", "type=keyword,doc_values=true") + .get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source", "t1", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); + } + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + ensureGreen(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareShrinkIndex("source", "target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build()).get()); + assertThat(exc.getMessage(), containsString("can't override index sort when shrinking index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2").build()).get()); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = + client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target", randomFrom("t1", "t2", "t3")) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index f6e7d29bd5815..c11d7daf98911 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.TransportShardBulkAction.ReplicaItemExecutionMode; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -46,10 +47,12 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; +import org.mockito.ArgumentCaptor; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.action.bulk.TransportShardBulkAction.replicaItemExecutionMode; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; @@ -85,26 +88,38 @@ public void testShouldExecuteReplicaItem() throws Exception { DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); BulkItemRequest request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, response)); - assertTrue(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); + assertThat(replicaItemExecutionMode(request, 0), + equalTo(ReplicaItemExecutionMode.NORMAL)); - // Failed index requests should not be replicated (for now!) + // Failed index requests without sequence no should not be replicated writeRequest = new IndexRequest("index", "type", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); - response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse( new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, new BulkItemResponse.Failure("index", "type", "id", new IllegalArgumentException("i died")))); - assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); + assertThat(replicaItemExecutionMode(request, 0), + equalTo(ReplicaItemExecutionMode.NOOP)); + // Failed index requests with sequence no should be replicated + request = new BulkItemRequest(0, writeRequest); + request.setPrimaryResponse( + new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, + new BulkItemResponse.Failure("index", "type", "id", + new IllegalArgumentException( + "i died after sequence no was generated"), + 1))); + assertThat(replicaItemExecutionMode(request, 0), + equalTo(ReplicaItemExecutionMode.FAILURE)); // NOOP requests should not be replicated writeRequest = new UpdateRequest("index", "type", "id"); response = new UpdateResponse(shardId, "type", "id", 1, DocWriteResponse.Result.NOOP); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, response)); - assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); + assertThat(replicaItemExecutionMode(request, 0), + equalTo(ReplicaItemExecutionMode.NOOP)); } @@ -506,6 +521,35 @@ public void testCalculateTranslogLocation() throws Exception { } + public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { + final IndexShard shard = spy(newStartedShard(false)); + BulkItemRequest itemRequest = new BulkItemRequest(0, + new IndexRequest("index", "type") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") + ); + final String failureMessage = "simulated primary failure"; + itemRequest.setPrimaryResponse(new BulkItemResponse(0, + randomFrom( + DocWriteRequest.OpType.CREATE, + DocWriteRequest.OpType.DELETE, + DocWriteRequest.OpType.INDEX + ), + new BulkItemResponse.Failure("index", "type", "1", + new IOException(failureMessage), 1L) + )); + BulkItemRequest[] itemRequests = new BulkItemRequest[1]; + itemRequests[0] = itemRequest; + BulkShardRequest bulkShardRequest = new BulkShardRequest( + shard.shardId(), RefreshPolicy.NONE, itemRequests); + TransportShardBulkAction.performOnReplica(bulkShardRequest, shard); + ArgumentCaptor noOp = ArgumentCaptor.forClass(Engine.NoOp.class); + verify(shard, times(1)).markSeqNoAsNoOp(noOp.capture()); + final Engine.NoOp noOpValue = noOp.getValue(); + assertThat(noOpValue.seqNo(), equalTo(1L)); + assertThat(noOpValue.reason(), containsString(failureMessage)); + closeShards(shard); + } + public void testMappingUpdateParsesCorrectNumberOfTimes() throws Exception { IndexMetaData metaData = indexMetaData(); logger.info("--> metadata.getIndex(): {}", metaData.getIndex()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexSortIT.java b/core/src/test/java/org/elasticsearch/index/IndexSortIT.java new file mode 100644 index 0000000000000..bb59bc948805c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/IndexSortIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class IndexSortIT extends ESIntegTestCase { + private static final XContentBuilder TEST_MAPPING = createTestMapping(); + + private static XContentBuilder createTestMapping() { + try { + return jsonBuilder() + .startObject() + .startObject("properties") + .startObject("date") + .field("type", "date") + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .endObject().endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + public void testIndexSort() { + SortField dateSort = new SortedNumericSortField("date", SortField.Type.LONG, false); + dateSort.setMissingValue(Long.MAX_VALUE); + SortField numericSort = new SortedNumericSortField("numeric_dv", SortField.Type.LONG, false); + numericSort.setMissingValue(Long.MAX_VALUE); + SortField keywordSort = new SortedSetSortField("keyword_dv", false); + keywordSort.setMissingValue(SortField.STRING_LAST); + Sort indexSort = new Sort(dateSort, numericSort, keywordSort); + prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .put("index.number_of_shards", "1") + .put("index.number_of_replicas", "1") + .putArray("index.sort.field", "date", "numeric_dv", "keyword_dv") + ) + .addMapping("test", TEST_MAPPING) + .get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("test", "test", Integer.toString(i)) + .setSource("numeric_dv", randomInt(), "keyword_dv", randomAlphaOfLengthBetween(10, 20)) + .get(); + } + flushAndRefresh(); + ensureYellow(); + assertSortedSegments("test", indexSort); + } + + public void testInvalidIndexSort() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .putArray("index.sort.field", "invalid_field") + ) + .addMapping("test", TEST_MAPPING) + .get() + ); + assertThat(exc.getMessage(), containsString("unknown index sort field:[invalid_field]")); + + exc = expectThrows(IllegalArgumentException.class, + () -> prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .putArray("index.sort.field", "numeric") + ) + .addMapping("test", TEST_MAPPING) + .get() + ); + assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[numeric]")); + + exc = expectThrows(IllegalArgumentException.class, + () -> prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .putArray("index.sort.field", "keyword") + ) + .addMapping("test", TEST_MAPPING) + .get() + ); + assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[keyword]")); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java new file mode 100644 index 0000000000000..af3fdf9adbc24 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; +import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class IndexSortSettingsTests extends ESTestCase { + private static IndexSettings indexSettings(Settings settings) { + return indexSettings(settings, null); + } + + private static IndexSettings indexSettings(Settings settings, Version version) { + final Settings newSettings; + if (version != null) { + newSettings = Settings.builder() + .put(settings) + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build(); + } else { + newSettings = settings; + } + return new IndexSettings(newIndexMeta("test", newSettings), Settings.EMPTY); + } + + public void testNoIndexSort() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertFalse(indexSettings.getIndexSortConfig().hasIndexSort()); + } + + public void testSimpleIndexSort() throws IOException { + Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .put("index.sort.order", "asc") + .put("index.sort.mode", "max") + .put("index.sort.missing", "_last") + .build(); + IndexSettings indexSettings = indexSettings(settings); + IndexSortConfig config = indexSettings.getIndexSortConfig(); + assertTrue(config.hasIndexSort()); + assertThat(config.sortSpecs.length, equalTo(1)); + + assertThat(config.sortSpecs[0].field, equalTo("field1")); + assertThat(config.sortSpecs[0].order, equalTo(SortOrder.ASC)); + assertThat(config.sortSpecs[0].missingValue, equalTo("_last")); + assertThat(config.sortSpecs[0].mode, equalTo(MultiValueMode.MAX)); + } + + public void testIndexSortWithArrays() throws IOException { + Settings settings = Settings.builder() + .putArray("index.sort.field", "field1", "field2") + .putArray("index.sort.order", "asc", "desc") + .putArray("index.sort.missing", "_last", "_first") + .build(); + IndexSettings indexSettings = indexSettings(settings); + IndexSortConfig config = indexSettings.getIndexSortConfig(); + assertTrue(config.hasIndexSort()); + assertThat(config.sortSpecs.length, equalTo(2)); + + assertThat(config.sortSpecs[0].field, equalTo("field1")); + assertThat(config.sortSpecs[1].field, equalTo("field2")); + assertThat(config.sortSpecs[0].order, equalTo(SortOrder.ASC)); + assertThat(config.sortSpecs[1].order, equalTo(SortOrder.DESC)); + assertThat(config.sortSpecs[0].missingValue, equalTo("_last")); + assertThat(config.sortSpecs[1].missingValue, equalTo("_first")); + assertNull(config.sortSpecs[0].mode); + assertNull(config.sortSpecs[1].mode); + } + + public void testInvalidIndexSort() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .put("index.sort.order", "asc, desc") + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); + assertThat(exc.getMessage(), containsString("index.sort.field:[field1] index.sort.order:[asc, desc], size mismatch")); + } + + public void testInvalidIndexSortWithArray() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .putArray("index.sort.order", new String[] {"asc", "desc"}) + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); + assertThat(exc.getMessage(), + containsString("index.sort.field:[field1] index.sort.order:[asc, desc], size mismatch")); + } + + public void testInvalidOrder() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .put("index.sort.order", "invalid") + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); + assertThat(exc.getMessage(), containsString("Illegal sort order:invalid")); + } + + public void testInvalidMode() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .put("index.sort.mode", "invalid") + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); + assertThat(exc.getMessage(), containsString("Illegal sort mode: invalid")); + } + + public void testInvalidMissing() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .put("index.sort.missing", "default") + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings)); + assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + + " must be one of [_last, _first]")); + } + + public void testInvalidVersion() throws IOException { + final Settings settings = Settings.builder() + .put("index.sort.field", "field1") + .build(); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0_UNRELEASED)); + assertThat(exc.getMessage(), + containsString("unsupported index.version.created:5.4.0, " + + "can't set index.sort on versions prior to 6.0.0-alpha1")); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 71d754ddfb6ca..a5bdf5c39641c 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -58,6 +58,8 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -260,7 +262,7 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, An config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), - config.getMaxUnsafeAutoIdTimestamp()); + config.getMaxUnsafeAutoIdTimestamp(), config.getIndexSort()); } @Override @@ -358,7 +360,18 @@ protected InternalEngine createEngine( MergePolicy mergePolicy, @Nullable IndexWriterFactory indexWriterFactory, @Nullable Supplier sequenceNumbersServiceSupplier) throws IOException { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); + return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, sequenceNumbersServiceSupplier, null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, + Store store, + Path translogPath, + MergePolicy mergePolicy, + @Nullable IndexWriterFactory indexWriterFactory, + @Nullable Supplier sequenceNumbersServiceSupplier, + @Nullable Sort indexSort) throws IOException { + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null, indexSort); InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, config); if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); @@ -393,12 +406,24 @@ public SequenceNumbersService seqNoService() { public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) { return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), - maxUnsafeAutoIdTimestamp, refreshListener); + maxUnsafeAutoIdTimestamp, refreshListener, null); + } + + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener, Sort indexSort) { + return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), + maxUnsafeAutoIdTimestamp, refreshListener, indexSort); } public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) { + return config(indexSettings, store, translogPath, mergePolicy, deletionPolicy, maxUnsafeAutoIdTimestamp, refreshListener, null); + } + + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp, + ReferenceManager.RefreshListener refreshListener, Sort indexSort) { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); final EngineConfig.OpenMode openMode; @@ -421,7 +446,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListener, - maxUnsafeAutoIdTimestamp); + maxUnsafeAutoIdTimestamp, indexSort); return config; } @@ -636,6 +661,37 @@ public void testSegmentsWithMergeFlag() throws Exception { } } + public void testSegmentsWithIndexSort() throws Exception { + Sort indexSort = new Sort(new SortedSetSortField("_type", false)); + try (Store store = createStore(); + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, + null, null, indexSort)) { + List segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + assertThat(segments.get(0).getSegmentSort(), equalTo(indexSort)); + + ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); + engine.index(indexForDoc(doc3)); + engine.refresh("test"); + + segments = engine.segments(true); + assertThat(segments.size(), equalTo(3)); + assertThat(segments.get(0).getSegmentSort(), equalTo(indexSort)); + assertThat(segments.get(1).getSegmentSort(), equalTo(indexSort)); + assertThat(segments.get(2).getSegmentSort(), equalTo(indexSort)); + } + } + public void testSegmentsStatsIncludingFileSizes() throws Exception { try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { @@ -2680,7 +2736,7 @@ public void testRecoverFromForeignTranslog() throws IOException { config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); + TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -2857,10 +2913,13 @@ public void testHandleDocumentFailure() throws Exception { } Engine.IndexResult indexResult = engine.index(indexForDoc(doc1)); assertNotNull(indexResult.getFailure()); - + // document failures should be recorded in translog + assertNotNull(indexResult.getTranslogLocation()); throwingIndexWriter.get().clearFailure(); indexResult = engine.index(indexForDoc(doc1)); assertNull(indexResult.getFailure()); + // document failures should be recorded in translog + assertNotNull(indexResult.getTranslogLocation()); engine.index(indexForDoc(doc2)); // test failure while deleting @@ -3672,12 +3731,9 @@ public long generateSeqNo() { final String reason = randomAlphaOfLength(16); noOpEngine.noOp( new Engine.NoOp( - null, - maxSeqNo + 1, + maxSeqNo + 1, primaryTerm, - 0, - VersionType.INTERNAL, - randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), + randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), System.nanoTime(), reason)); assertThat(noOpEngine.seqNoService().getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); diff --git a/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java b/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java new file mode 100644 index 0000000000000..9ee0a343b95e5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/engine/SegmentTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortedNumericSelector; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Objects; + +public class SegmentTests extends ESTestCase { + static SortField randomSortField() { + if (randomBoolean()) { + SortedNumericSortField field = + new SortedNumericSortField(randomAlphaOfLengthBetween(1, 10), + SortField.Type.INT, + randomBoolean(), + randomBoolean() ? SortedNumericSelector.Type.MAX : SortedNumericSelector.Type.MIN); + if (randomBoolean()) { + field.setMissingValue(randomInt()); + } + return field; + } else { + SortedSetSortField field = + new SortedSetSortField(randomAlphaOfLengthBetween(1, 10), + randomBoolean(), + randomBoolean() ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); + if (randomBoolean()) { + field.setMissingValue(randomBoolean() ? SortedSetSortField.STRING_FIRST : SortedSetSortField.STRING_LAST); + } + return field; + } + } + + static Sort randomIndexSort() { + if (randomBoolean()) { + return null; + } + int size = randomIntBetween(1, 5); + SortField[] fields = new SortField[size]; + for (int i = 0; i < size; i++) { + fields[i] = randomSortField(); + } + return new Sort(fields); + } + + static Segment randomSegment() { + Segment segment = new Segment(randomAlphaOfLength(10)); + segment.committed = randomBoolean(); + segment.search = randomBoolean(); + segment.sizeInBytes = randomNonNegativeLong(); + segment.docCount = randomIntBetween(1, Integer.MAX_VALUE); + segment.delDocCount = randomIntBetween(0, segment.docCount); + segment.version = Version.LUCENE_6_5_0; + segment.compound = randomBoolean(); + segment.mergeId = randomAlphaOfLengthBetween(1, 10); + segment.memoryInBytes = randomNonNegativeLong(); + segment.segmentSort = randomIndexSort(); + return segment; + } + + public void testSerialization() throws IOException { + for (int i = 0; i < 20; i++) { + Segment segment = randomSegment(); + BytesStreamOutput output = new BytesStreamOutput(); + segment.writeTo(output); + output.flush(); + StreamInput input = output.bytes().streamInput(); + Segment deserialized = new Segment(); + deserialized.readFrom(input); + assertTrue(isSegmentEquals(deserialized, segment)); + } + } + + static boolean isSegmentEquals(Segment seg1, Segment seg2) { + return seg1.docCount == seg2.docCount && + seg1.delDocCount == seg2.delDocCount && + seg1.committed == seg2.committed && + seg1.search == seg2.search && + Objects.equals(seg1.version, seg2.version) && + Objects.equals(seg1.compound, seg2.compound) && + seg1.sizeInBytes == seg2.sizeInBytes && + seg1.memoryInBytes == seg2.memoryInBytes && + seg1.getGeneration() == seg2.getGeneration() && + seg1.getName().equals(seg2.getName()) && + seg1.getMergeId().equals(seg2.getMergeId()) && + Objects.equals(seg1.segmentSort, seg2.segmentSort); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 0a6a8f8d46954..7141550a44fcd 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -287,4 +287,26 @@ public void testPartitionedConstraints() { .put("index.routing_partition_size", 2)) .execute().actionGet().isAcknowledged()); } + + public void testIndexSortWithNestedFields() throws IOException { + Settings settings = Settings.builder() + .put("index.sort.field", "_type") + .build(); + IllegalArgumentException invalidNestedException = expectThrows(IllegalArgumentException.class, + () -> createIndex("test", settings, "t", "nested_field", "type=nested")); + assertThat(invalidNestedException.getMessage(), + containsString("cannot have nested fields when index sort is activated")); + IndexService indexService = createIndex("test", settings, "t"); + CompressedXContent nestedFieldMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("nested_field") + .field("type", "nested") + .endObject() + .endObject().endObject().bytes()); + invalidNestedException = expectThrows(IllegalArgumentException.class, + () -> indexService.mapperService().merge("t", nestedFieldMapping, + MergeReason.MAPPING_UPDATE, true)); + assertThat(invalidNestedException.getMessage(), + containsString("cannot have nested fields when index sort is activated")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 2996362735f2d..2243a5769b99a 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -22,21 +22,21 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkItemRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.bulk.BulkShardResponse; +import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkActionTests; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction.ReplicaResponse; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -50,7 +50,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; @@ -58,6 +57,7 @@ import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; @@ -77,8 +77,6 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary; -import static org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnReplica; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -147,9 +145,13 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", Integer.toString(docId.incrementAndGet())) - .source("{}", XContentType.JSON); - final IndexResponse response = index(indexRequest); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + .source("{}", XContentType.JSON); + final BulkItemResponse response = index(indexRequest); + if (response.isFailed()) { + throw response.getFailure().getCause(); + } else { + assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); + } } primary.updateGlobalCheckpointOnPrimary(); return numOfDoc; @@ -158,43 +160,29 @@ public int indexDocs(final int numOfDoc) throws Exception { public int appendDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); - final IndexResponse response = index(indexRequest); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + final BulkItemResponse response = index(indexRequest); + if (response.isFailed()) { + throw response.getFailure().getCause(); + } else if (response.isFailed() == false) { + assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); + } } primary.updateGlobalCheckpointOnPrimary(); return numOfDoc; } - public IndexResponse index(IndexRequest indexRequest) throws Exception { - PlainActionFuture listener = new PlainActionFuture<>(); + public BulkItemResponse index(IndexRequest indexRequest) throws Exception { + PlainActionFuture listener = new PlainActionFuture<>(); final ActionListener wrapBulkListener = ActionListener.wrap( - bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0].getResponse()), + bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), listener::onFailure); BulkItemRequest[] items = new BulkItemRequest[1]; - items[0] = new TestBulkItemRequest(0, indexRequest); + items[0] = new BulkItemRequest(0, indexRequest); BulkShardRequest request = new BulkShardRequest(shardId, indexRequest.getRefreshPolicy(), items); new IndexingAction(request, wrapBulkListener, this).execute(); return listener.get(); } - /** BulkItemRequest exposing get/set primary response */ - public class TestBulkItemRequest extends BulkItemRequest { - - TestBulkItemRequest(int id, DocWriteRequest request) { - super(id, request); - } - - @Override - protected void setPrimaryResponse(BulkItemResponse primaryResponse) { - super.setPrimaryResponse(primaryResponse); - } - - @Override - protected BulkItemResponse getPrimaryResponse() { - return super.getPrimaryResponse(); - } - } - public synchronized void startAll() throws IOException { startReplicas(replicas.size()); } @@ -442,7 +430,7 @@ protected Set getInSyncAllocationIds(ShardId shardId, ClusterState clust protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception; - protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica) throws IOException; + protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; class PrimaryRef implements ReplicationOperation.Primary { @@ -539,47 +527,53 @@ class IndexingAction extends ReplicationAction result = executeShardBulkOnPrimary(primary, request); + return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); } @Override - protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws IOException { - final ReplicationGroup.TestBulkItemRequest bulkItemRequest = ((ReplicationGroup.TestBulkItemRequest) request.items()[0]); - final DocWriteResponse primaryResponse = bulkItemRequest.getPrimaryResponse().getResponse(); - indexOnReplica(primaryResponse, ((IndexRequest) bulkItemRequest.request()), replica); + protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { + executeShardBulkOnReplica(replica, request); + } + } + + private TransportWriteAction.WritePrimaryResult executeShardBulkOnPrimary(IndexShard primary, BulkShardRequest request) throws Exception { + for (BulkItemRequest itemRequest : request.items()) { + if (itemRequest.request() instanceof IndexRequest) { + ((IndexRequest) itemRequest.request()).process(null, index.getName()); + } } + final TransportWriteAction.WritePrimaryResult result = + TransportShardBulkAction.performOnPrimary(request, primary, null, + System::currentTimeMillis, new TransportShardBulkActionTests.NoopMappingUpdatePerformer()); + request.primaryTerm(primary.getPrimaryTerm()); + TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger); + return result; + } + + private void executeShardBulkOnReplica(IndexShard replica, BulkShardRequest request) throws Exception { + final Translog.Location location = TransportShardBulkAction.performOnReplica(request, replica); + TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); } /** * indexes the given requests on the supplied primary, modifying it for replicas */ - protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { - final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, - new TransportShardBulkActionTests.NoopMappingUpdatePerformer()); - request.primaryTerm(primary.getPrimaryTerm()); - TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger); - return new IndexResponse( - primary.shardId(), - request.type(), - request.id(), - indexResult.getSeqNo(), - primary.getPrimaryTerm(), - indexResult.getVersion(), - indexResult.isCreated()); + BulkShardRequest indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { + final BulkItemRequest bulkItemRequest = new BulkItemRequest(0, request); + BulkItemRequest[] bulkItemRequests = new BulkItemRequest[1]; + bulkItemRequests[0] = bulkItemRequest; + final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), bulkItemRequests); + final TransportWriteAction.WritePrimaryResult result = + executeShardBulkOnPrimary(primary, bulkShardRequest); + return result.replicaRequest(); } /** * indexes the given requests on the supplied replica shard */ - protected void indexOnReplica(DocWriteResponse response, IndexRequest request, IndexShard replica) throws IOException { - final Engine.IndexResult result = executeIndexRequestOnReplica(response, request, replica); - TransportWriteActionTestHelper.performPostWriteActions(replica, request, result.getTranslogLocation(), logger); + void indexOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { + executeShardBulkOnReplica(replica, request); } class GlobalCheckpointSync extends ReplicationAction future = shards.asyncRecoverReplica(replica, (indexShard, node) - -> new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) { + -> new RecoveryTarget(indexShard, node, recoveryListener, version -> { + }) { @Override public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { super.cleanFiles(totalTranslogOps, sourceMetaData); @@ -113,8 +122,8 @@ public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception { shards.startAll(); final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); indexRequest.onRetry(); // force an update of the timestamp - final IndexResponse response = shards.index(indexRequest); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + final BulkItemResponse response = shards.index(indexRequest); + assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); if (randomBoolean()) { // lets check if that also happens if no translog record is replicated shards.flush(); } @@ -147,7 +156,7 @@ public void testCheckpointsAdvance() throws Exception { final SeqNoStats shardStats = shard.seqNoStats(); final ShardRouting shardRouting = shard.routingEntry(); logger.debug("seq_no stats for {}: {}", shardRouting, XContentHelper.toString(shardStats, - new ToXContent.MapParams(Collections.singletonMap("pretty", "false")))); + new ToXContent.MapParams(Collections.singletonMap("pretty", "false")))); assertThat(shardRouting + " local checkpoint mismatch", shardStats.getLocalCheckpoint(), equalTo(numDocs - 1L)); assertThat(shardRouting + " global checkpoint mismatch", shardStats.getGlobalCheckpoint(), equalTo(numDocs - 1L)); @@ -158,7 +167,7 @@ public void testCheckpointsAdvance() throws Exception { public void testConflictingOpsOnReplica() throws Exception { Map mappings = - Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); + Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) { shards.startAll(); IndexShard replica1 = shards.getReplicas().get(0); @@ -180,4 +189,128 @@ public void testConflictingOpsOnReplica() throws Exception { } } } + + /** + * test document failures (failures after seq_no generation) are added as noop operation to the translog + * for primary and replica shards + */ + public void testDocumentFailureReplication() throws Exception { + final String failureMessage = "simulated document failure"; + final ThrowingDocumentFailureEngineFactory throwingDocumentFailureEngineFactory = + new ThrowingDocumentFailureEngineFactory(failureMessage); + try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(0)) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return throwingDocumentFailureEngineFactory; + }}) { + + // test only primary + shards.startPrimary(); + BulkItemResponse response = shards.index( + new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") + .source("{}", XContentType.JSON) + ); + assertTrue(response.isFailed()); + assertNoOpTranslogOperationForDocumentFailure(shards, 1, failureMessage); + shards.assertAllEqual(0); + + // add some replicas + int nReplica = randomIntBetween(1, 3); + for (int i = 0; i < nReplica; i++) { + shards.addReplica(); + } + shards.startReplicas(nReplica); + response = shards.index( + new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") + .source("{}", XContentType.JSON) + ); + assertTrue(response.isFailed()); + assertNoOpTranslogOperationForDocumentFailure(shards, 2, failureMessage); + shards.assertAllEqual(0); + } + } + + /** + * test request failures (failures before seq_no generation) are not added as a noop to translog + */ + public void testRequestFailureReplication() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startAll(); + BulkItemResponse response = shards.index( + new IndexRequest(index.getName(), "testRequestFailureException", "1") + .source("{}", XContentType.JSON) + .version(2) + ); + assertTrue(response.isFailed()); + assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); + shards.assertAllEqual(0); + for (IndexShard indexShard : shards) { + try(Translog.View view = indexShard.acquireTranslogView()) { + assertThat(view.totalOperations(), equalTo(0)); + } + } + + // add some replicas + int nReplica = randomIntBetween(1, 3); + for (int i = 0; i < nReplica; i++) { + shards.addReplica(); + } + shards.startReplicas(nReplica); + response = shards.index( + new IndexRequest(index.getName(), "testRequestFailureException", "1") + .source("{}", XContentType.JSON) + .version(2) + ); + assertTrue(response.isFailed()); + assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); + shards.assertAllEqual(0); + for (IndexShard indexShard : shards) { + try(Translog.View view = indexShard.acquireTranslogView()) { + assertThat(view.totalOperations(), equalTo(0)); + } + } + } + } + + /** Throws documentFailure on every indexing operation */ + static class ThrowingDocumentFailureEngineFactory implements EngineFactory { + final String documentFailureMessage; + + ThrowingDocumentFailureEngineFactory(String documentFailureMessage) { + this.documentFailureMessage = documentFailureMessage; + } + + @Override + public Engine newReadWriteEngine(EngineConfig config) { + return InternalEngineTests.createInternalEngine((directory, writerConfig) -> + new IndexWriter(directory, writerConfig) { + @Override + public long addDocument(Iterable doc) throws IOException { + assert documentFailureMessage != null; + throw new IOException(documentFailureMessage); + } + }, null, config); + } + } + + private static void assertNoOpTranslogOperationForDocumentFailure( + Iterable replicationGroup, + int expectedOperation, + String failureMessage) throws IOException { + for (IndexShard indexShard : replicationGroup) { + try(Translog.View view = indexShard.acquireTranslogView()) { + assertThat(view.totalOperations(), equalTo(expectedOperation)); + final Translog.Snapshot snapshot = view.snapshot(); + long expectedSeqNo = 0L; + Translog.Operation op = snapshot.next(); + do { + assertThat(op.opType(), equalTo(Translog.Operation.Type.NO_OP)); + assertThat(op.seqNo(), equalTo(expectedSeqNo)); + assertThat(((Translog.NoOp) op).reason(), containsString(failureMessage)); + op = snapshot.next(); + expectedSeqNo++; + } while (op != null); + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 12f749e681918..139c7f500d8d7 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -24,9 +24,9 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.index.engine.Engine; @@ -168,8 +168,8 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { for (int i = 0; i < rollbackDocs; i++) { final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i) .source("{}", XContentType.JSON); - final IndexResponse primaryResponse = indexOnPrimary(indexRequest, oldPrimary); - indexOnReplica(primaryResponse, indexRequest, replica); + final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); + indexOnReplica(bulkShardRequest, replica); } if (randomBoolean()) { oldPrimary.flush(new FlushRequest(index.getName())); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index fec0b766d3490..e68ee0758fc24 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -531,7 +531,7 @@ public static final IndexShard newIndexShard(IndexService indexService, IndexSh IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), - shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), + shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexService.getThreadPool(), indexService.getBigArrays(), null, () -> {}, Collections.emptyList(), Arrays.asList(listeners)); return newShard; diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index b7e20cf75c83c..3e5a34c3921fe 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -123,7 +123,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); + TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); engine = new InternalEngine(config); listeners.setTranslog(engine.getTranslog()); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java index f31733dc47723..dc7d620a97b37 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java @@ -20,6 +20,8 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -27,6 +29,10 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -47,13 +53,27 @@ public class StoreRecoveryTests extends ESTestCase { public void testAddIndices() throws IOException { Directory[] dirs = new Directory[randomIntBetween(1, 10)]; final int numDocs = randomIntBetween(50, 100); + final Sort indexSort; + if (randomBoolean()) { + indexSort = new Sort(new SortedNumericSortField("num", SortField.Type.LONG, true)); + } else { + indexSort = null; + } int id = 0; for (int i = 0; i < dirs.length; i++) { dirs[i] = newFSDirectory(createTempDir()); - IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE)); + IndexWriterConfig iwc = newIndexWriterConfig() + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE); + if (indexSort != null) { + iwc.setIndexSort(indexSort); + } + IndexWriter writer = new IndexWriter(dirs[i], iwc); for (int j = 0; j < numDocs; j++) { - writer.addDocument(Arrays.asList(new StringField("id", Integer.toString(id++), Field.Store.YES))); + writer.addDocument(Arrays.asList( + new StringField("id", Integer.toString(id++), Field.Store.YES), + new SortedNumericDocValuesField("num", randomLong()) + )); } writer.commit(); @@ -62,7 +82,7 @@ public void testAddIndices() throws IOException { StoreRecovery storeRecovery = new StoreRecovery(new ShardId("foo", "bar", 1), logger); RecoveryState.Index indexStats = new RecoveryState.Index(); Directory target = newFSDirectory(createTempDir()); - storeRecovery.addIndices(indexStats, target, dirs); + storeRecovery.addIndices(indexStats, target, indexSort, dirs); int numFiles = 0; Predicate filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false && f.startsWith("extra") == false; @@ -80,7 +100,11 @@ public void testAddIndices() throws IOException { DirectoryReader reader = DirectoryReader.open(target); SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target); for (SegmentCommitInfo info : segmentCommitInfos) { // check that we didn't merge - assertEquals("all sources must be flush", info.info.getDiagnostics().get("source"), "flush"); + assertEquals("all sources must be flush", + info.info.getDiagnostics().get("source"), "flush"); + if (indexSort != null) { + assertEquals(indexSort, info.info.getIndexSort()); + } } assertEquals(reader.numDeletedDocs(), 0); assertEquals(reader.numDocs(), id); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 82af8d0ec1b28..dfb8efb9ab943 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -2247,11 +2247,11 @@ public void testSimpleCommit() throws IOException { final long generation = randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration())); translog.commit(generation); - for (long i = 0; i < generation; i++) { - assertFileDeleted(translog, i); + for (long g = 0; g < generation; g++) { + assertFileDeleted(translog, g); } - for (long i = generation; i <= translog.currentFileGeneration(); i++) { - assertFileIsPresent(translog, i); + for (long g = generation; g <= translog.currentFileGeneration(); g++) { + assertFileIsPresent(translog, g); } } @@ -2271,10 +2271,10 @@ public void testPrepareCommitAndCommit() throws IOException { final int committedGeneration = randomIntBetween(Math.max(1, Math.toIntExact(last)), Math.toIntExact(generation)); translog.commit(committedGeneration); last = committedGeneration; - for (long g = 0; i < generation; g++) { + for (long g = 0; g < committedGeneration; g++) { assertFileDeleted(translog, g); } - for (long g = generation; g < translog.currentFileGeneration(); g++) { + for (long g = committedGeneration; g <= translog.currentFileGeneration(); g++) { assertFileIsPresent(translog, g); } } @@ -2302,7 +2302,7 @@ public void testCommitWithOpenView() throws IOException { } // the view generation could be -1 if no commit has been performed final long max = Math.max(1, Math.min(lastCommittedGeneration, viewGeneration)); - for (long g = max; g < translog.currentFileGeneration(); g++) { + for (long g = max; g <= translog.currentFileGeneration(); g++) { assertFileIsPresent(translog, g); } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index cf22c95ac6997..f3bd58fd38a1c 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -151,6 +151,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th when(indexService.mapperService()).thenReturn(mapperService); when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList()); when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {}); + when(indexService.getIndexSortSupplier()).thenReturn(() -> null); return indexService; }); } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java new file mode 100644 index 0000000000000..0cfa07538e424 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.percentiles; + +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +public abstract class InternalPercentilesTestCase extends InternalAggregationTestCase { + + private double[] percents; + + @Before + public void init() { + percents = randomPercents(); + } + + @Override + protected T createTestInstance(String name, List pipelineAggregators, Map metaData) { + int numValues = randomInt(100); + double[] values = new double[numValues]; + for (int i = 0; i < numValues; ++i) { + values[i] = randomDouble(); + } + return createTestInstance(name, pipelineAggregators, metaData, randomBoolean(), DocValueFormat.RAW, percents, values); + } + + protected abstract T createTestInstance(String name, List pipelineAggregators, Map metaData, + boolean keyed, DocValueFormat format, double[] percents, double[] values); + + private static double[] randomPercents() { + List randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); + double[] percents = new double[randomCdfValues.size()]; + for (int i = 0; i < randomCdfValues.size(); i++) { + percents[i] = randomCdfValues.get(i); + } + return percents; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java new file mode 100644 index 0000000000000..bff026d5cf4b0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; + +import org.HdrHistogram.DoubleHistogram; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class InternalHDRPercentilesTests extends InternalPercentilesTestCase { + + @Override + protected InternalHDRPercentiles createTestInstance(String name, + List pipelineAggregators, + Map metaData, + boolean keyed, DocValueFormat format, double[] percents, double[] values) { + + final DoubleHistogram state = new DoubleHistogram(3); + Arrays.stream(values).forEach(state::recordValue); + + return new InternalHDRPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData); + } + + @Override + protected void assertReduced(InternalHDRPercentiles reduced, List inputs) { + // it is hard to check the values due to the inaccuracy of the algorithm + long totalCount = 0; + for (InternalHDRPercentiles ranks : inputs) { + totalCount += ranks.state.getTotalCount(); + } + assertEquals(totalCount, reduced.state.getTotalCount()); + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalHDRPercentiles::new; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java index 75efa516409ae..f2db4a48530ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java @@ -21,29 +21,24 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.InternalAggregationTestCase; +import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import java.util.Arrays; import java.util.List; import java.util.Map; -public class InternalTDigestPercentilesTests extends InternalAggregationTestCase { - - private final double[] percents = randomPercents(); +public class InternalTDigestPercentilesTests extends InternalPercentilesTestCase { @Override protected InternalTDigestPercentiles createTestInstance(String name, List pipelineAggregators, - Map metaData) { - boolean keyed = randomBoolean(); - DocValueFormat format = DocValueFormat.RAW; - TDigestState state = new TDigestState(100); + Map metaData, + boolean keyed, DocValueFormat format, double[] percents, double[] values) { + final TDigestState state = new TDigestState(100); + Arrays.stream(values).forEach(state::add); - int numValues = randomInt(10); - for (int i = 0; i < numValues; ++i) { - state.add(randomDouble() * 100); - } - assertEquals(state.centroidCount(), numValues); + assertEquals(state.centroidCount(), values.length); return new InternalTDigestPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData); } @@ -69,13 +64,4 @@ protected void assertReduced(InternalTDigestPercentiles reduced, List instanceReader() { return InternalTDigestPercentiles::new; } - - private static double[] randomPercents() { - List randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); - double[] percents = new double[randomCdfValues.size()]; - for (int i = 0; i < randomCdfValues.size(); i++) { - percents[i] = randomCdfValues.get(i); - } - return percents; - } } diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index f69df7f51713f..0362f2a6fe3a3 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -169,7 +169,7 @@ Before starting, you need to have: * A http://www.windowsazure.com/[Windows Azure account] * OpenSSL that isn't from MacPorts, specifically `OpenSSL 1.0.1f 6 Jan 2014` doesn't seem to create a valid keypair for ssh. FWIW, - `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 12.04 LTS is known to work. + `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 14.04 LTS is known to work. * SSH keys and certificate + -- diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc new file mode 100644 index 0000000000000..0c2b5c9abe979 --- /dev/null +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -0,0 +1,107 @@ +[[index-modules-index-sorting]] +== Index Sorting + +experimental[] + +When creating a new index in elasticsearch it is possible to configure how the Segments +inside each Shard will be sorted. By default Lucene does not apply any sort. +The `index.sort.*` settings define which fields should be used to sort the documents inside each Segment. + +[WARNING] +nested fields are not compatible with index sorting because they rely on the assumption +that nested documents are stored in contiguous doc ids, which can be broken by index sorting. +An error will be thrown if index sorting is activated on an index that contains nested fields. + +For instance the following example shows how to define a sort on a single field: + +[source,js] +-------------------------------------------------- +PUT twitter +{ + "settings" : { + "index" : { + "sort.field" : "date", <1> + "sort.order" : "desc" <2> + } + }, + "mappings": { + "tweet": { + "properties": { + "date": { + "type": "date" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> This index is sorted by the `date` field +<2> ... in descending order. + +It is also possible to sort the index by more than one field: + +[source,js] +-------------------------------------------------- +PUT twitter +{ + "settings" : { + "index" : { + "sort.field" : ["username", "date"], <1> + "sort.order" : ["asc", "desc"] <2> + } + }, + "mappings": { + "tweet": { + "properties": { + "username": { + "type": "keyword", + "doc_values": true + }, + "date": { + "type": "date" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> This index is sorted by `username` first then by `date` +<2> ... in ascending order for the `username` field and in descending order for the `date` field. + + +Index sorting supports the following settings: + +`index.sort.field`:: + + The list of fields used to sort the index. + Only `boolean`, `numeric`, `date` and `keyword` fields with `doc_values` are allowed here. + +`index.sort.order`:: + + The sort order to use for each field. + The order option can have the following values: + * `asc`: For ascending order + * `desc`: For descending order. + +`index.sort.mode`:: + + Elasticsearch supports sorting by multi-valued fields. + The mode option controls what value is picked to sort the document. + The mode option can have the following values: + * `min`: Pick the lowest value. + * `max`: Pick the highest value. + +`index.sort.missing`:: + + The missing parameter specifies how docs which are missing the field should be treated. + The missing value can have the following values: + * `_last`: Documents without value for the field are sorted last. + * `_first`: Documents without value for the field are sorted first. + +[WARNING] +Index sorting can be defined only once at index creation. It is not allowed to add or update +a sort on an existing index. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 9f919a2802ded..f195ee1f2fd75 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1511,15 +1511,72 @@ Converts a JSON string into a structured JSON object. | `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. |====== +Suppose you provide this configuration of the `json` processor: + +[source,js] +-------------------------------------------------- +{ + "json" : { + "field" : "string_source", + "target_field" : "json_target" + } +} +-------------------------------------------------- + +If the following document is processed: + +[source,js] +-------------------------------------------------- +{ + "string_source": "{\"foo\": 2000}" +} +-------------------------------------------------- + +after the `json` processor operates on it, it will look like: + [source,js] -------------------------------------------------- { - "json": { - "field": "{\"foo\": 2000}" + "string_source": "{\"foo\": 2000}", + "json_target": { + "foo": 2000 } } -------------------------------------------------- +If the following configuration is provided, omitting the optional `target_field` setting: +[source,js] +-------------------------------------------------- +{ + "json" : { + "field" : "source_and_target" + } +} +-------------------------------------------------- + +then after the `json` processor operates on this document: + +[source,js] +-------------------------------------------------- +{ + "source_and_target": "{\"foo\": 2000}" +} +-------------------------------------------------- + +it will look like: + +[source,js] +-------------------------------------------------- +{ + "source_and_target": { + "foo": 2000 + } +} +-------------------------------------------------- + +This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` +is the same field provided in the required `field` configuration. + [[kv-processor]] === KV Processor This processor helps automatically parse messages (or specific event fields) which are of the foo=bar variety. diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 815606fb7bd3d..45839ac55d950 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -6,7 +6,7 @@ contains any value other than `null`. This field is used by the <> query to find documents that either have or don't have any non-+null+ value for a particular field. -The value of the `_field_name` field is accessible in queries: +The value of the `_field_names` field is accessible in queries: [source,js] -------------------------- @@ -34,4 +34,4 @@ GET my_index/_search -------------------------- // CONSOLE -<1> Querying on the `_field_names` field (also see the <> query) \ No newline at end of file +<1> Querying on the `_field_names` field (also see the <> query) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 0a41d0f465354..95923070e924b 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -235,8 +235,7 @@ For example, bind-mounting a `custom_elasticsearch.yml` with `docker run` can be -------------------------------------------- -v full_path_to/custom_elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -------------------------------------------- - -IMPORTANT: `custom_elasticsearch.yml` should be readable by uid:gid `1000:1000` +IMPORTANT: The container **runs Elasticsearch as user `elasticsearch` using uid:gid `1000:1000`**. Bind mounted host directories and files, such as `custom_elasticsearch.yml` above, **need to be accessible by this user**. For the https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings[data and log dirs], such as `/usr/share/elasticsearch/data`, write access is required as well. ===== C. Customized image In some environments, it may make more sense to prepare a custom image containing your configuration. A `Dockerfile` to achieve this may be as simple as: @@ -274,6 +273,8 @@ We have collected a number of best practices for production use. NOTE: Any Docker parameters mentioned below assume the use of `docker run`. +. Elasticsearch inside the container runs as user `elasticsearch` using uid:gid `1000:1000`. If you are bind mounting a local directory or file, ensure it is readable by this user while the https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings[data and log dirs] additionally require write access. + . It is important to correctly set capabilities and ulimits via the Docker CLI. As seen earlier in the example <>, the following options are required: + --cap-add=IPC_LOCK --ulimit memlock=-1:-1 --ulimit nofile=65536:65536 diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java index 378cca7f58fb2..e723081e36c0c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java @@ -43,7 +43,8 @@ public final class CompilerSettings { public static final String PICKY = "picky"; /** - * For testing: do not use. + * Hack to set the initial "depth" for the {@link DefBootstrap.PIC} and {@link DefBootstrap.MIC}. Only used for testing: do not + * overwrite. */ public static final String INITIAL_CALL_SITE_DEPTH = "initialCallSiteDepth"; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java index 69b40f141e2a2..5fc41c8c63038 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java @@ -77,8 +77,8 @@ private void arrayLoadStoreTestCase(boolean declareAsDef, String valueType, Obje } private void expectOutOfBounds(int index, String script, Object val) { - IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, - () -> exec(script, singletonMap("val", val), true)); + IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, () -> + exec(script, singletonMap("val", val), true)); try { assertThat(e.getMessage(), outOfBoundsExceptionMessageMatcher(index, 5)); } catch (AssertionError ae) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index ef2ddad5452d0..97e1f01fdfc94 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -186,7 +186,7 @@ public void testNullSafeDeref() { assertNull( exec("def a = null; return a?.toString()")); assertEquals("foo", exec("def a = 'foo'; return a?.toString()")); // Call with primitive result - assertMustBeNullable( "String a = null; return a?.length()"); + assertMustBeNullable( "String a = null; return a?.length()"); assertMustBeNullable( "String a = 'foo'; return a?.length()"); assertNull( exec("def a = null; return a?.length()")); assertEquals(3, exec("def a = 'foo'; return a?.length()")); @@ -265,7 +265,7 @@ public void testNullSafeDeref() { } private void assertMustBeNullable(String script) { - Exception e = expectScriptThrows(IllegalArgumentException.class , () -> exec(script)); + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> exec(script)); assertEquals("Result of null safe operator must be nullable", e.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java index fe95e8c8c2316..c3861add319dd 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java @@ -325,7 +325,7 @@ public interface NoArgumentsConstant { Object execute(String foo); } public void testNoArgumentsConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(NoArgumentsConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + "names of the method arguments but [" + NoArgumentsConstant.class.getName() + "] doesn't have one.")); @@ -336,7 +336,7 @@ public interface WrongArgumentsConstant { Object execute(String foo); } public void testWrongArgumentsConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(WrongArgumentsConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + "names of the method arguments but [" + WrongArgumentsConstant.class.getName() + "] doesn't have one.")); @@ -347,7 +347,7 @@ public interface WrongLengthOfArgumentConstant { Object execute(String foo); } public void testWrongLengthOfArgumentConstant() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(WrongLengthOfArgumentConstant.class, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but [" + WrongLengthOfArgumentConstant.class.getName() + "#execute] takes [1] argument.")); @@ -358,7 +358,7 @@ public interface UnknownArgType { Object execute(UnknownArgType foo); } public void testUnknownArgType() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(UnknownArgType.class, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments " + "that are of whitelisted types.", e.getMessage()); @@ -369,7 +369,7 @@ public interface UnknownReturnType { UnknownReturnType execute(String foo); } public void testUnknownReturnType() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(UnknownReturnType.class, null, "1", emptyMap())); assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName() + "#execute] returns [" + UnknownReturnType.class.getName() + "] which isn't whitelisted.", e.getMessage()); @@ -380,7 +380,7 @@ public interface UnknownArgTypeInArray { Object execute(UnknownArgTypeInArray[] foo); } public void testUnknownArgTypeInArray() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(UnknownArgTypeInArray.class, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept " + "arguments that are of whitelisted types.", e.getMessage()); @@ -391,7 +391,7 @@ public interface TwoExecuteMethods { Object execute(boolean foo); } public void testTwoExecuteMethods() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(TwoExecuteMethods.class, null, "null", emptyMap())); assertEquals("Painless can only implement interfaces that have a single method named [execute] but [" + TwoExecuteMethods.class.getName() + "] has more than one.", e.getMessage()); @@ -401,7 +401,7 @@ public interface BadMethod { Object something(); } public void testBadMethod() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(BadMethod.class, null, "null", emptyMap())); assertEquals("Painless can only implement methods named [execute] and [uses$argName] but [" + BadMethod.class.getName() + "] contains a method named [something]", e.getMessage()); @@ -413,7 +413,7 @@ public interface BadUsesReturn { Object uses$foo(); } public void testBadUsesReturn() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(BadUsesReturn.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that return boolean but [" + BadUsesReturn.class.getName() + "#uses$foo] returns [java.lang.Object].", e.getMessage()); @@ -425,7 +425,7 @@ public interface BadUsesParameter { boolean uses$bar(boolean foo); } public void testBadUsesParameter() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(BadUsesParameter.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that do not take parameters but [" + BadUsesParameter.class.getName() + "#uses$bar] does.", e.getMessage()); @@ -437,7 +437,7 @@ public interface BadUsesName { boolean uses$baz(); } public void testBadUsesName() { - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(BadUsesName.class, null, "null", emptyMap())); assertEquals("Painless can only implement uses$ methods that match a parameter name but [" + BadUsesName.class.getName() + "#uses$baz] doesn't match any of [foo, bar].", e.getMessage()); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java index bce70a080dbe6..bcb92a527d9e6 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java @@ -204,7 +204,7 @@ public void testNestedCaptureParams() { public void testWrongArity() { assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { exec("Optional.empty().orElseGet(x -> x);"); }); assertTrue(expected.getMessage().contains("Incorrect number of parameters")); @@ -220,7 +220,7 @@ public void testWrongArityDef() { public void testWrongArityNotEnough() { assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); }); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 83a592b3f2632..92ff9ef3c9334 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -26,10 +26,8 @@ import java.util.Arrays; import java.util.HashSet; import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.containsString; public class RegexTests extends ScriptTestCase { @Override @@ -264,8 +262,9 @@ public void testBadRegexPattern() { assertEquals("Error compiling regex: Illegal Unicode escape sequence", e.getCause().getMessage()); // And make sure the location of the error points to the offset inside the pattern - assertEquals("/\\ujjjj/", e.getScriptStack().get(0)); - assertEquals(" ^---- HERE", e.getScriptStack().get(1)); + assertScriptStack(e, + "/\\ujjjj/", + " ^---- HERE"); } public void testRegexAgainstNumber() { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 74c6c9a5628f0..1ab5aa14508ce 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -35,6 +35,8 @@ import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.hasSize; + /** * Base test case for scripting unit tests. *

@@ -114,10 +116,29 @@ public void assertBytecodeHasPattern(String script, String pattern) { /** Checks a specific exception class is thrown (boxed inside ScriptException) and returns it. */ public static T expectScriptThrows(Class expectedType, ThrowingRunnable runnable) { + return expectScriptThrows(expectedType, true, runnable); + } + + /** Checks a specific exception class is thrown (boxed inside ScriptException) and returns it. */ + public static T expectScriptThrows(Class expectedType, boolean shouldHaveScriptStack, + ThrowingRunnable runnable) { try { runnable.run(); } catch (Throwable e) { if (e instanceof ScriptException) { + boolean hasEmptyScriptStack = ((ScriptException) e).getScriptStack().isEmpty(); + if (shouldHaveScriptStack && hasEmptyScriptStack) { + if (0 != e.getCause().getStackTrace().length) { + // Without -XX:-OmitStackTraceInFastThrow the jvm can eat the stack trace which causes us to ignore script_stack + AssertionFailedError assertion = new AssertionFailedError("ScriptException should have a scriptStack"); + assertion.initCause(e); + throw assertion; + } + } else if (false == shouldHaveScriptStack && false == hasEmptyScriptStack) { + AssertionFailedError assertion = new AssertionFailedError("ScriptException shouldn't have a scriptStack"); + assertion.initCause(e); + throw assertion; + } e = e.getCause(); if (expectedType.isInstance(e)) { return expectedType.cast(e); @@ -134,4 +155,21 @@ public static T expectScriptThrows(Class expectedType, } throw new AssertionFailedError("Expected exception " + expectedType.getSimpleName()); } + + /** + * Asserts that the script_stack looks right. + */ + public static void assertScriptStack(ScriptException e, String... stack) { + // This particular incantation of assertions makes the error messages more useful + try { + assertThat(e.getScriptStack(), hasSize(stack.length)); + for (int i = 0; i < stack.length; i++) { + assertEquals(stack[i], e.getScriptStack().get(i)); + } + } catch (AssertionError assertion) { + assertion.initCause(e); + throw assertion; + } + } + } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java index da4558a693a0d..2888eca3db4fa 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java @@ -165,12 +165,12 @@ public void testStringAndCharacter() { assertEquals('c', exec("String s = \"c\"; (char)s")); assertEquals('c', exec("String s = 'c'; (char)s")); - ClassCastException expected = expectScriptThrows(ClassCastException.class, () -> { + ClassCastException expected = expectScriptThrows(ClassCastException.class, false, () -> { assertEquals("cc", exec("return (String)(char)\"cc\"")); }); assertTrue(expected.getMessage().contains("Cannot cast [String] with length greater than one to [char].")); - expected = expectScriptThrows(ClassCastException.class, () -> { + expected = expectScriptThrows(ClassCastException.class, false, () -> { assertEquals("cc", exec("return (String)(char)'cc'")); }); assertTrue(expected.getMessage().contains("Cannot cast [String] with length greater than one to [char].")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index aaa337ae821ba..d60da7b795fbc 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -19,7 +19,10 @@ package org.elasticsearch.painless; +import junit.framework.AssertionFailedError; + import org.apache.lucene.util.Constants; +import org.elasticsearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; import java.util.Arrays; @@ -27,52 +30,93 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.instanceOf; public class WhenThingsGoWrongTests extends ScriptTestCase { public void testNullPointer() { expectScriptThrows(NullPointerException.class, () -> { exec("int x = params['missing']; return x;"); }); + expectScriptThrows(NullPointerException.class, () -> { + exec("Double.parseDouble(params['missing'])"); + }); } - /** test "line numbers" in the bytecode, which are really 1-based offsets */ - public void testLineNumbers() { - // trigger NPE at line 1 of the script - NullPointerException exception = expectScriptThrows(NullPointerException.class, () -> { - exec("String x = null; boolean y = x.isEmpty();\n" + - "return y;"); - }); - // null deref at x.isEmpty(), the '.' is offset 30 (+1) - assertEquals(30 + 1, exception.getStackTrace()[0].getLineNumber()); + /** + * Test that the scriptStack looks good. By implication this tests that we build proper "line numbers" in stack trace. These line + * numbers are really 1 based character numbers. + */ + public void testScriptStack() { + for (String type : new String[] {"String", "def "}) { + // trigger NPE at line 1 of the script + ScriptException exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null; boolean y = x.isEmpty();\n" + + "return y;"); + }); + // null deref at x.isEmpty(), the '.' is offset 30 + assertScriptElementColumn(30, exception); + assertScriptStack(exception, + "y = x.isEmpty();\n", + " ^---- HERE"); + assertThat(exception.getCause(), instanceOf(NullPointerException.class)); + + // trigger NPE at line 2 of the script + exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null;\n" + + "return x.isEmpty();"); + }); + // null deref at x.isEmpty(), the '.' is offset 25 + assertScriptElementColumn(25, exception); + assertScriptStack(exception, + "return x.isEmpty();", + " ^---- HERE"); + assertThat(exception.getCause(), instanceOf(NullPointerException.class)); + + // trigger NPE at line 3 of the script + exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null;\n" + + type + " y = x;\n" + + "return y.isEmpty();"); + }); + // null deref at y.isEmpty(), the '.' is offset 39 + assertScriptElementColumn(39, exception); + assertScriptStack(exception, + "return y.isEmpty();", + " ^---- HERE"); + assertThat(exception.getCause(), instanceOf(NullPointerException.class)); + + // trigger NPE at line 4 in script (inside conditional) + exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null;\n" + + "boolean y = false;\n" + + "if (!y) {\n" + + " y = x.isEmpty();\n" + + "}\n" + + "return y;"); + }); + // null deref at x.isEmpty(), the '.' is offset 53 + assertScriptElementColumn(53, exception); + assertScriptStack(exception, + "y = x.isEmpty();\n}\n", + " ^---- HERE"); + assertThat(exception.getCause(), instanceOf(NullPointerException.class)); + } + } - // trigger NPE at line 2 of the script - exception = expectScriptThrows(NullPointerException.class, () -> { - exec("String x = null;\n" + - "return x.isEmpty();"); - }); - // null deref at x.isEmpty(), the '.' is offset 25 (+1) - assertEquals(25 + 1, exception.getStackTrace()[0].getLineNumber()); - - // trigger NPE at line 3 of the script - exception = expectScriptThrows(NullPointerException.class, () -> { - exec("String x = null;\n" + - "String y = x;\n" + - "return y.isEmpty();"); - }); - // null deref at y.isEmpty(), the '.' is offset 39 (+1) - assertEquals(39 + 1, exception.getStackTrace()[0].getLineNumber()); - - // trigger NPE at line 4 in script (inside conditional) - exception = expectScriptThrows(NullPointerException.class, () -> { - exec("String x = null;\n" + - "boolean y = false;\n" + - "if (!y) {\n" + - " y = x.isEmpty();\n" + - "}\n" + - "return y;"); - }); - // null deref at x.isEmpty(), the '.' is offset 53 (+1) - assertEquals(53 + 1, exception.getStackTrace()[0].getLineNumber()); + private void assertScriptElementColumn(int expectedColumn, ScriptException exception) { + StackTraceElement[] stackTrace = exception.getCause().getStackTrace(); + for (int i = 0; i < stackTrace.length; i++) { + if (WriterConstants.CLASS_NAME.equals(stackTrace[i].getClassName())) { + if (expectedColumn + 1 != stackTrace[i].getLineNumber()) { + AssertionFailedError assertion = new AssertionFailedError("Expected column to be [" + expectedColumn + "] but was [" + + stackTrace[i].getLineNumber() + "]"); + assertion.initCause(exception); + throw assertion; + } + return; + } + } + fail("didn't find script stack element"); } public void testInvalidShift() { @@ -161,7 +205,7 @@ public void testSourceLimits() { final char[] tooManyChars = new char[Compiler.MAXIMUM_SOURCE_LENGTH + 1]; Arrays.fill(tooManyChars, '0'); - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { exec(new String(tooManyChars)); }); assertTrue(expected.getMessage().contains("Scripts may be no longer than")); @@ -282,5 +326,4 @@ public void testRegularUnexpectedCharacter() { e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'cat", false)); assertEquals("unexpected character ['cat].", e.getMessage()); } - } diff --git a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java index f0be775306725..6ef40a7778236 100644 --- a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -41,6 +41,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.anyOf; @@ -76,7 +77,7 @@ private int indexDocs(String index, final int idStart, final int numDocs) throws for (int i = 0; i < numDocs; i++) { final int id = idStart + i; assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), - new StringEntity("{\"test\": \"test_" + id + "\"}", ContentType.APPLICATION_JSON))); + new StringEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}", ContentType.APPLICATION_JSON))); } return numDocs; } @@ -116,7 +117,7 @@ public void testIndexVersionPropagation() throws Exception { .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._name", bwcNames); - final String index = "test"; + final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; createIndex(index, settings.build()); @@ -130,7 +131,9 @@ public void testIndexVersionPropagation() throws Exception { updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); assertOK(client().performRequest("POST", index + "/_refresh")); - List shards = buildShards(nodes, newNodeClient); + List shards = buildShards(index, nodes, newNodeClient); + Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); + logger.info("primary resolved to: " + primary.getNode().getNodeName()); for (Shard shard : shards) { assertVersion(index, 1, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc1); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 1); @@ -140,13 +143,15 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(nodes, newNodeClient); + shards = buildShards(index, nodes, newNodeClient); + primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); + logger.info("primary resolved to: " + primary.getNode().getNodeName()); for (Shard shard : shards) { assertVersion(index, 2, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc2); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 2); } - Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); + primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); @@ -154,7 +159,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(nodes, newNodeClient); + shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 3); @@ -167,7 +172,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(nodes, newNodeClient); + shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 4); @@ -180,7 +185,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); - shards = buildShards(nodes, newNodeClient); + shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 5); @@ -216,7 +221,7 @@ public void testSeqNoCheckpoints() throws Exception { final int numberOfInitialDocs = 1 + randomInt(5); logger.info("indexing [{}] docs initially", numberOfInitialDocs); numDocs += indexDocs(index, 0, numberOfInitialDocs); - assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); + assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, 0, newNodeClient); logger.info("allowing shards on all nodes"); updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); @@ -227,8 +232,8 @@ public void testSeqNoCheckpoints() throws Exception { final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5); logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes); numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes); - assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); - Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); + assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, 0, newNodeClient); + Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); @@ -237,7 +242,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("indexing [{}] docs after moving primary", numberOfDocsAfterMovingPrimary); numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary); numDocs += numberOfDocsAfterMovingPrimary; - assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); + assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); /* * Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in * the recovery code. @@ -255,7 +260,7 @@ public void testSeqNoCheckpoints() throws Exception { // the number of documents on the primary and on the recovered replica should match the number of indexed documents assertCount(index, "_primary", numDocs); assertCount(index, "_replica", numDocs); - assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); + assertSeqNoOnShards(index, nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); } } @@ -274,10 +279,11 @@ private void assertVersion(final String index, final int docId, final String pre assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); } - private void assertSeqNoOnShards(Nodes nodes, boolean checkGlobalCheckpoints, int numDocs, RestClient client) throws Exception { + private void assertSeqNoOnShards(String index, Nodes nodes, boolean checkGlobalCheckpoints, int numDocs, RestClient client) + throws Exception { assertBusy(() -> { try { - List shards = buildShards(nodes, client); + List shards = buildShards(index, nodes, client); Shard primaryShard = shards.stream().filter(Shard::isPrimary).findFirst().get(); assertNotNull("failed to find primary shard", primaryShard); final long expectedGlobalCkp; @@ -311,9 +317,9 @@ private void assertSeqNoOnShards(Nodes nodes, boolean checkGlobalCheckpoints, in }); } - private List buildShards(Nodes nodes, RestClient client) throws IOException { - Response response = client.performRequest("GET", "test/_stats", singletonMap("level", "shards")); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices.test.shards.0"); + private List buildShards(String index, Nodes nodes, RestClient client) throws IOException { + Response response = client.performRequest("GET", index + "/_stats", singletonMap("level", "shards")); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); ArrayList shards = new ArrayList<>(); for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml new file mode 100644 index 0000000000000..705c2d6f2cbe3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yaml @@ -0,0 +1,75 @@ +--- +"Index Sort": + + - skip: + version: " - 5.99.99" + reason: this uses a new feature that has been added in 6.0.0 + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index.sort.field: rank + mappings: + t: + properties: + rank: + type: integer + + - do: + index: + index: test + type: test + id: "1" + body: { "rank": 4 } + + - do: + index: + index: test + type: test + id: "2" + body: { "rank": 1 } + + - do: + index: + index: test + type: test + id: "3" + body: { "rank": 3 } + + - do: + index: + index: test + type: test + id: "4" + body: { "rank": 2 } + + - do: + indices.refresh: + index: test + + - do: + indices.forcemerge: + index: test + max_num_segments: 1 + + - do: + indices.refresh: + index: test + + - do: + search: + index: test + type: test + body: + sort: _doc + + - match: {hits.total: 4 } + - length: {hits.hits: 4 } + - match: {hits.hits.0._id: "2" } + - match: {hits.hits.1._id: "4" } + - match: {hits.hits.2._id: "3" } + - match: {hits.hits.3._id: "1" } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index abd3f33b5f77b..4062666ddbbf8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -279,7 +279,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe }); IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache, new NoneCircuitBreakerService(), mapperService); - indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService, + indexShard = new IndexShard(routing, indexSettings, shardPath, store, () ->null, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, globalCheckpointSyncer, Collections.emptyList(), Arrays.asList(listeners)); success = true; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index db15ac0c33533..6d15a5e164ef5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -24,6 +24,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; +import org.apache.lucene.search.Sort; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -45,7 +46,10 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -111,6 +115,7 @@ import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; @@ -1996,6 +2001,23 @@ public Set assertAllShardsOnNodes(String index, String... pattern) { return nodes; } + + /** + * Asserts that all segments are sorted with the provided {@link Sort}. + */ + public void assertSortedSegments(String indexName, Sort expectedIndexSort) { + IndicesSegmentResponse segmentResponse = + client().admin().indices().prepareSegments(indexName).execute().actionGet(); + IndexSegments indexSegments = segmentResponse.getIndices().get(indexName); + for (IndexShardSegments indexShardSegments : indexSegments.getShards().values()) { + for (ShardSegments shardSegments : indexShardSegments.getShards()) { + for (Segment segment : shardSegments) { + assertThat(expectedIndexSort, equalTo(segment.getSegmentSort())); + } + } + } + } + protected static class NumShards { public final int numPrimaries; public final int numReplicas; From 5156d0ef51f846186c360b3836231f417225e981 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:27:05 -0400 Subject: [PATCH 11/17] Fix --- .../org/elasticsearch/action/bulk/TransportShardBulkAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 332311a23d00b..44aeb9fad3f28 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -288,7 +288,7 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request - result = new Engine.IndexResult(failure, updateRequest.version()); + result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); break; // out of retry loop } // execute translated update request From 4314fc8f5ca9b0db9aabff8394e9d1140a208292 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:29:23 -0400 Subject: [PATCH 12/17] Fix formatting --- .../action/bulk/TransportShardBulkAction.java | 34 ++++++++++++++----- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 44aeb9fad3f28..3484c32671f7f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -54,6 +54,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; @@ -320,9 +321,14 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq case INDEX: assert result instanceof Engine.IndexResult : result.getClass(); IndexRequest updateIndexRequest = translate.action(); - final IndexResponse indexResponse = - new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), - result.getVersion(), ((Engine.IndexResult) result).isCreated()); + final IndexResponse indexResponse = new IndexResponse( + primary.shardId(), + updateIndexRequest.type(), + updateIndexRequest.id(), + result.getSeqNo(), + primary.getPrimaryTerm(), + result.getVersion(), + ((Engine.IndexResult) result).isCreated()); BytesReference indexSourceAsBytes = updateIndexRequest.source(); updateResponse = new UpdateResponse( indexResponse.getShardInfo(), @@ -346,9 +352,14 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq case DELETE: assert result instanceof Engine.DeleteResult : result.getClass(); DeleteRequest updateDeleteRequest = translate.action(); - DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), - updateDeleteRequest.type(), updateDeleteRequest.id(), result.getSeqNo(), primary.getPrimaryTerm(), - result.getVersion(), ((Engine.DeleteResult) result).isFound()); + DeleteResponse deleteResponse = new DeleteResponse( + primary.shardId(), + updateDeleteRequest.type(), + updateDeleteRequest.id(), + result.getSeqNo(), + primary.getPrimaryTerm(), + result.getVersion(), + ((Engine.DeleteResult) result).isFound()); updateResponse = new UpdateResponse( deleteResponse.getShardInfo(), deleteResponse.getShardId(), @@ -358,9 +369,14 @@ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateReq deleteResponse.getPrimaryTerm(), deleteResponse.getVersion(), deleteResponse.getResult()); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, - request.index(), deleteResponse.getVersion(), translate.updatedSourceAsMap(), - translate.updateSourceContentType(), null)); + final GetResult getResult = updateHelper.extractGetResult( + updateRequest, + request.index(), + deleteResponse.getVersion(), + translate.updatedSourceAsMap(), + translate.updateSourceContentType(), + null); + updateResponse.setGetResult(getResult); // set translated request as replica request replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); break; From 67c27a123dba78474de106cdcf49519b657fa3d6 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 12:33:04 -0400 Subject: [PATCH 13/17] Fix --- .../elasticsearch/action/bulk/TransportShardBulkAction.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 3484c32671f7f..30f38230bc94f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -543,8 +543,7 @@ private static Engine.IndexResult executeIndexRequestOnReplica( try { operation = prepareIndexOperationOnReplica(primaryResponse, request, replica); } catch (MapperParsingException e) { - return new Engine.IndexResult(e, primaryResponse.getVersion() - ); + return new Engine.IndexResult(e, primaryResponse.getVersion(), primaryResponse.getSeqNo()); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); From e6057c0509d5490c385455f14685513a09fb724c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 13:12:16 -0400 Subject: [PATCH 14/17] More fix --- .../action/bulk/TransportShardBulkActionTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index c11d7daf98911..4f4b9858787ce 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -470,7 +470,7 @@ public void testUpdateReplicaRequestWithSuccess() throws Exception { boolean created = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); - Engine.IndexResult indexResult = new FakeResult(1, 1, 17, created, resultLocation); + Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation); DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created); BulkItemResultHolder goodResults = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); @@ -652,7 +652,7 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; - protected FakeResult(long version, long seqNo, long primaryTerm, boolean created, Translog.Location location) { + protected FakeResult(long version, long seqNo, boolean created, Translog.Location location) { super(version, seqNo, created); this.location = location; } From 8847aa07926bfc520b37761c1a6da985e1b71bee Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 13:13:23 -0400 Subject: [PATCH 15/17] More usage --- core/src/main/java/org/elasticsearch/index/engine/Engine.java | 4 ++-- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 7c1bad4fcef3f..122587949e319 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -433,11 +433,11 @@ public boolean isFound() { public static class NoOpResult extends Result { - NoOpResult(long seqNo, long primaryTerm) { + NoOpResult(long seqNo) { super(Operation.TYPE.NO_OP, 0, seqNo); } - NoOpResult(long seqNo, long primaryTerm, Exception failure) { + NoOpResult(long seqNo, Exception failure) { super(Operation.TYPE.NO_OP, failure, 0, seqNo); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 2c59869868162..5ce7fee5907dc 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1068,7 +1068,7 @@ public NoOpResult noOp(final NoOp noOp) { try (ReleasableLock ignored = readLock.acquire()) { noOpResult = innerNoOp(noOp); } catch (final Exception e) { - noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm(), e); + noOpResult = new NoOpResult(noOp.seqNo(), e); } return noOpResult; } @@ -1077,7 +1077,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(noOp.seqNo(), noOp.primaryTerm()); + final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); noOpResult.setTook(System.nanoTime() - noOp.startTime()); From b1a20e73f7b81cca76c56560dc4c17b6e0ff73a9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 13:26:05 -0400 Subject: [PATCH 16/17] Another usage --- .../elasticsearch/gradle/test/ClusterFormationTasks.groovy | 2 +- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index ea01ba6daf86e..2f4d7c35cc855 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -549,7 +549,7 @@ class ClusterFormationTasks { static Task configureWaitTask(String name, Project project, List nodes, List startTasks) { Task wait = project.tasks.create(name: name, dependsOn: startTasks) wait.doLast { - ant.waitfor(maxwait: '60', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { + ant.waitfor(maxwait: ' ', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { or { for (NodeInfo node : nodes) { resourceexists { diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5ce7fee5907dc..3e5d3453cacf9 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -706,7 +706,7 @@ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { currentVersion, index.version(), currentNotFoundOrDeleted)) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted); - plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, index.primaryTerm()); + plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion); } else { plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted, seqNoService().generateSeqNo(), @@ -828,7 +828,7 @@ static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing) { } static IndexingStrategy skipDueToVersionConflict( - VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long primaryTerm) { + VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion) { final IndexResult result = new IndexResult(e, currentVersion); return new IndexingStrategy( currentNotFoundOrDeleted, false, false, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); From 9dd40669d18dd5aca207bd28f6a85c7c2e34f335 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Apr 2017 13:37:30 -0400 Subject: [PATCH 17/17] More fixes --- .../elasticsearch/gradle/test/ClusterFormationTasks.groovy | 2 +- .../action/bulk/TransportShardBulkActionTests.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 2f4d7c35cc855..ea01ba6daf86e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -549,7 +549,7 @@ class ClusterFormationTasks { static Task configureWaitTask(String name, Project project, List nodes, List startTasks) { Task wait = project.tasks.create(name: name, dependsOn: startTasks) wait.doLast { - ant.waitfor(maxwait: ' ', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { + ant.waitfor(maxwait: '60', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { or { for (NodeInfo node : nodes) { resourceexists { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 4f4b9858787ce..941cdbf995752 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -394,7 +394,7 @@ public void testUpdateReplicaRequestWithFailure() throws Exception { BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); Exception err = new ElasticsearchException("I'm dead <(x.x)>"); - Engine.IndexResult indexResult = new Engine.IndexResult(err, 0); + Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest); @@ -432,7 +432,7 @@ public void testUpdateReplicaRequestWithConflictFailure() throws Exception { Exception err = new VersionConflictEngineException(shardId, "type", "id", "I'm conflicted <(;_;)>"); - Engine.IndexResult indexResult = new Engine.IndexResult(err, 0); + Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest);