diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index b8f50846c0e4b..09f0062352d6e 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -45,18 +45,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
@@ -151,13 +139,6 @@
-
-
-
-
-
-
-
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
index f105b15d6653c..0caae77d7dead 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
@@ -170,7 +170,8 @@ private void executeHealth(final ClusterHealthRequest request, final ActionListe
}
final ClusterState state = clusterService.state();
- final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext());
+ final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService,
+ null, logger, threadPool.getThreadContext());
if (request.timeout().millis() == 0) {
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
return;
@@ -209,8 +210,8 @@ private boolean validateRequest(final ClusterHealthRequest request, ClusterState
return readyCounter == waitFor;
}
- private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor,
- boolean timedOut) {
+ private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState,
+ final int waitFor, boolean timedOut) {
ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(),
gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime());
int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver);
@@ -325,7 +326,7 @@ private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, Cluste
// one of the specified indices is not there - treat it as RED.
ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY,
clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState),
- pendingTaskTimeInQueue);
+ pendingTaskTimeInQueue);
response.setStatus(ClusterHealthStatus.RED);
return response;
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
index fbee68ab3fcc3..cdef2a03b534c 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
@@ -41,8 +41,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse {
ClusterStatsNodeResponse() {
}
- public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo,
- NodeStats nodeStats, ShardStats[] shardsStats) {
+ public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus,
+ NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) {
super(node);
this.nodeInfo = nodeInfo;
this.nodeStats = nodeStats;
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
index 2c24d2852217e..01ef94c428a41 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
@@ -171,7 +171,8 @@ public SortedTopDocs sortDocs(boolean ignoreFrom, Collection extends SearchPha
final TopDocsAndMaxScore td = queryResult.consumeTopDocs();
assert td != null;
topDocsStats.add(td);
- if (td.topDocs.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet
+ // make sure we set the shard index before we add it - the consumer didn't do that yet
+ if (td.topDocs.scoreDocs.length > 0) {
setShardIndex(td.topDocs, queryResult.getShardIndex());
topDocs.add(td.topDocs);
}
@@ -308,7 +309,8 @@ public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) {
* completion suggestion ordered by suggestion name
*/
public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase,
- Collection extends SearchPhaseResult> fetchResults, IntFunction resultsLookup) {
+ Collection extends SearchPhaseResult> fetchResults,
+ IntFunction resultsLookup) {
if (reducedQueryPhase.isEmptyResult) {
return InternalSearchResponse.empty();
}
@@ -416,7 +418,8 @@ public ReducedQueryPhase reducedQueryPhase(Collection extends SearchPhaseResul
* Reduces the given query results and consumes all aggregations and profile results.
* @param queryResults a list of non-null query shard results
*/
- public ReducedQueryPhase reducedQueryPhase(Collection extends SearchPhaseResult> queryResults, boolean isScrollRequest, boolean trackTotalHits) {
+ public ReducedQueryPhase reducedQueryPhase(Collection extends SearchPhaseResult> queryResults,
+ boolean isScrollRequest, boolean trackTotalHits) {
return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest);
}
@@ -441,7 +444,8 @@ private ReducedQueryPhase reducedQueryPhase(Collection extends SearchPhaseResu
Boolean terminatedEarly = null;
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore,
- timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null, null, numReducePhases, false, 0, 0, true);
+ timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null,
+ null, numReducePhases, false, 0, 0, true);
}
final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult();
final boolean hasSuggest = firstResult.suggest() != null;
@@ -671,7 +675,8 @@ private synchronized void consumeInternal(QuerySearchResult querySearchResult) {
}
if (hasTopDocs) {
TopDocs reducedTopDocs = controller.mergeTopDocs(Arrays.asList(topDocsBuffer),
- querySearchResult.from() + querySearchResult.size() // we have to merge here in the same way we collect on a shard
+ // we have to merge here in the same way we collect on a shard
+ querySearchResult.from() + querySearchResult.size()
, 0);
Arrays.fill(topDocsBuffer, null);
topDocsBuffer[0] = reducedTopDocs;
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java
index fda393e375c9e..938489d6cbedf 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java
@@ -38,7 +38,8 @@
import java.util.List;
import java.util.Set;
-public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest {
+public class MultiTermVectorsRequest extends ActionRequest
+ implements Iterable, CompositeIndicesRequest, RealtimeRequest {
String preference;
List requests = new ArrayList<>();
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
index 4cd02caf91c32..dc849ca3d1334 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
@@ -616,18 +616,21 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
} else if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) {
termVectorsRequest.filterSettings(readFilterSettings(parser));
- } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing.
+ } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) {
+ // the following is important for multi request parsing.
termVectorsRequest.index = parser.text();
} else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
termVectorsRequest.type = parser.text();
} else if (ID.match(currentFieldName, parser.getDeprecationHandler())) {
if (termVectorsRequest.doc != null) {
- throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!");
+ throw new ElasticsearchParseException("failed to parse term vectors request. " +
+ "either [id] or [doc] can be specified, but not both!");
}
termVectorsRequest.id = parser.text();
} else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) {
if (termVectorsRequest.id != null) {
- throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!");
+ throw new ElasticsearchParseException("failed to parse term vectors request. " +
+ "either [id] or [doc] can be specified, but not both!");
}
termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser));
} else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) {
@@ -653,7 +656,8 @@ public static Map readPerFieldAnalyzer(Map map)
if (e.getValue() instanceof String) {
mapStrStr.put(e.getKey(), (String) e.getValue());
} else {
- throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass());
+ throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead",
+ e.getKey(), e.getValue().getClass());
}
}
return mapStrStr;
@@ -682,7 +686,8 @@ private static FilterSettings readFilterSettings(XContentParser parser) throws I
} else if (currentFieldName.equals("max_word_length")) {
settings.maxWordLength = parser.intValue();
} else {
- throw new ElasticsearchParseException("failed to parse term vectors request. the field [{}] is not valid for filter parameter for term vector request", currentFieldName);
+ throw new ElasticsearchParseException("failed to parse term vectors request. " +
+ "the field [{}] is not valid for filter parameter for term vector request", currentFieldName);
}
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java
index 01a9812516bf7..9159a07e83c03 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java
@@ -197,7 +197,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}
- private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fields theFields, Iterator fieldIter) throws IOException {
+ private void buildField(XContentBuilder builder, final CharsRefBuilder spare,
+ Fields theFields, Iterator fieldIter) throws IOException {
String fieldName = fieldIter.next();
builder.startObject(fieldName);
Terms curTerms = theFields.terms(fieldName);
@@ -213,7 +214,8 @@ private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fi
builder.endObject();
}
- private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, TermsEnum termIter, BoostAttribute boostAtt) throws IOException {
+ private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms,
+ TermsEnum termIter, BoostAttribute boostAtt) throws IOException {
// start term, optimized writing
BytesRef term = termIter.next();
spare.copyUTF8Bytes(term);
@@ -235,7 +237,8 @@ private void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) th
// boolean that says if these values actually were requested.
// However, we can assume that they were not if the statistic values are
// <= 0.
- assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1)));
+ assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) ||
+ ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1)));
int docFreq = termIter.docFreq();
if (docFreq > 0) {
builder.field(FieldStrings.DOC_FREQ, docFreq);
@@ -349,12 +352,13 @@ public void setExists(boolean exists) {
this.exists = exists;
}
- public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields) throws IOException {
+ public void setFields(Fields termVectorsByField, Set selectedFields,
+ EnumSet flags, Fields topLevelFields) throws IOException {
setFields(termVectorsByField, selectedFields, flags, topLevelFields, null, null);
}
- public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields, @Nullable AggregatedDfs dfs,
- TermVectorsFilter termVectorsFilter) throws IOException {
+ public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags,
+ Fields topLevelFields, @Nullable AggregatedDfs dfs, TermVectorsFilter termVectorsFilter) throws IOException {
TermVectorsWriter tvw = new TermVectorsWriter(this);
if (termVectorsByField != null) {
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java
index 9aca80b533f66..d38a980c58979 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java
@@ -141,10 +141,12 @@ void setFields(Fields termVectorsByField, Set selectedFields, EnumSet shardRequests = new HashMap<>();
for (int i = 0; i < request.requests.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i);
- termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index()));
+ termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(),
+ termVectorsRequest.index()));
if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) {
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(),
termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
continue;
}
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
- if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
- responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
- new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));
+ if (termVectorsRequest.routing() == null &&
+ clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
+ responses.set(i, new MultiTermVectorsItemResponse(null,
+ new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
+ new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" +
+ termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));
continue;
}
ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex,
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
index 6796d23eaadcf..e8d6c1bcb4ff6 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
@@ -36,7 +36,8 @@
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-public class TransportShardMultiTermsVectorAction extends TransportSingleShardAction {
+public class TransportShardMultiTermsVectorAction extends
+ TransportSingleShardAction {
private final IndicesService indicesService;
@@ -86,7 +87,8 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ
if (TransportActions.isShardNotAvailableException(e)) {
throw e;
} else {
- logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), e);
+ logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]",
+ shardId, termVectorsRequest.type(), termVectorsRequest.id()), e);
response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), e));
}
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java
index 49a78275669fc..dcd0fa1b911b9 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java
@@ -85,7 +85,8 @@ protected void resolveRequest(ClusterState state, InternalRequest request) {
}
@Override
- protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, ActionListener listener) throws IOException {
+ protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId,
+ ActionListener listener) throws IOException {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
if (request.realtime()) { // it's a realtime request which is not subject to refresh cycles
diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
index 9cf85c1c7732f..8561d106bdf78 100644
--- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
@@ -75,7 +75,8 @@ public TransportUpdateAction(ThreadPool threadPool, ClusterService clusterServic
UpdateHelper updateHelper, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService,
AutoCreateIndex autoCreateIndex, NodeClient client) {
- super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new);
+ super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters,
+ indexNameExpressionResolver, UpdateRequest::new);
this.updateHelper = updateHelper;
this.indicesService = indicesService;
this.autoCreateIndex = autoCreateIndex;
@@ -114,7 +115,8 @@ public static void resolveAndValidateRouting(MetaData metaData, String concreteI
protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) {
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
- client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() {
+ client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)")
+ .masterNodeTimeout(request.timeout()), new ActionListener() {
@Override
public void onResponse(CreateIndexResponse result) {
innerExecute(task, request, listener);
@@ -177,11 +179,14 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
final BytesReference upsertSourceBytes = upsertRequest.source();
client.bulk(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse(
ActionListener.wrap(response -> {
- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult());
+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(),
+ response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(),
+ response.getVersion(), response.getResult());
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
Tuple> sourceAndContent =
XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType());
- update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
+ update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(),
+ sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {
update.setGetResult(null);
}
@@ -197,8 +202,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
final BytesReference indexSourceBytes = indexRequest.source();
client.bulk(toSingleItemBulkRequest(indexRequest), wrapBulkResponse(
ActionListener.wrap(response -> {
- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult());
- update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(),
+ response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(),
+ response.getVersion(), response.getResult());
+ update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(),
+ result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update);
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
@@ -208,8 +216,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener<
DeleteRequest deleteRequest = result.action();
client.bulk(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse(
ActionListener.wrap(response -> {
- UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult());
- update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
+ UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(),
+ response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(),
+ response.getVersion(), response.getResult());
+ update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(),
+ result.updatedSourceAsMap(), result.updateSourceContentType(), null));
update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update);
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index 6fb0580bfe3fe..a4fdce17d09a1 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -149,11 +149,13 @@ public ActionRequestValidationException validate() {
} else {
if (version != Versions.MATCH_ANY && retryOnConflict > 0) {
- validationException = addValidationError("can't provide both retry_on_conflict and a specific version", validationException);
+ validationException = addValidationError("can't provide both retry_on_conflict and a specific version",
+ validationException);
}
if (!versionType.validateVersionForWrites(version)) {
- validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
+ validationException = addValidationError("illegal version value [" + version + "] for version type [" +
+ versionType.name() + "]", validationException);
}
}
@@ -618,8 +620,8 @@ private IndexRequest safeDoc() {
}
/**
- * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException}
- * is thrown.
+ * Sets the index request to be used if the document does not exists. Otherwise, a
+ * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown.
*/
public UpdateRequest upsert(IndexRequest upsertRequest) {
this.upsertRequest = upsertRequest;
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index 9d1fd4a677f05..181dba6a10734 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -243,8 +243,8 @@ public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source)
}
/**
- * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException}
- * is thrown.
+ * Sets the index request to be used if the document does not exists. Otherwise, a
+ * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown.
*/
public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
request.upsert(indexRequest);
diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java
index b45449425cb24..ad2447cb7b3d0 100644
--- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java
+++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java
@@ -87,7 +87,8 @@ public void onFailure(Exception e) {
if (response instanceof SearchResponse) {
SearchResponse searchResponse = (SearchResponse) response;
for (ShardSearchFailure failure : searchResponse.getShardFailures()) {
- assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
+ assertTrue("got unexpected reason..." + failure.reason(),
+ failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
}
} else {
Exception t = (Exception) response;
@@ -95,7 +96,8 @@ public void onFailure(Exception e) {
if (unwrap instanceof SearchPhaseExecutionException) {
SearchPhaseExecutionException e = (SearchPhaseExecutionException) unwrap;
for (ShardSearchFailure failure : e.shardFailures()) {
- assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
+ assertTrue("got unexpected reason..." + failure.reason(),
+ failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
}
} else if ((unwrap instanceof EsRejectedExecutionException) == false) {
throw new AssertionError("unexpected failure", (Throwable) response);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java
index 9f5d40c6709f0..7c00705b2a28e 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java
@@ -120,7 +120,8 @@ public void onFailure(Exception e) {
assertHitCount(
client().prepareSearch()
.setQuery(matchAllQuery())
- .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2"))))
+ .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery()
+ .must(termQuery("field1", "value1")).must(termQuery("field1", "value2"))))
.get(),
3L);
}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
index 06a8f0fb522e8..012f801698f96 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
@@ -70,8 +70,8 @@ public void testClusterHealth() throws IOException {
int inFlight = randomIntBetween(0, 200);
int delayedUnassigned = randomIntBetween(0, 200);
TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000));
- ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", new String[] {MetaData.ALL}, clusterState, pendingTasks,
- inFlight, delayedUnassigned, pendingTaskInQueueTime);
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", new String[] {MetaData.ALL},
+ clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime);
clusterHealth = maybeSerialize(clusterHealth);
assertClusterHealth(clusterHealth);
assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks));
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
index 8db74ba0a192a..1d22e4b6c6e6a 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
@@ -66,8 +66,8 @@ public void testVerifyRepositoryWithBlocks() {
// This test checks that the Get Repository operation is never blocked, even if the cluster is read only.
try {
setClusterReadOnly(true);
- VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute()
- .actionGet();
+ VerifyRepositoryResponse response = client().admin().cluster()
+ .prepareVerifyRepository("test-repo-blocks").execute().actionGet();
assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes()));
} finally {
setClusterReadOnly(false);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
index 4d4486434949c..f51c2b7b172c4 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
@@ -88,10 +88,8 @@ public void testCreateSnapshotWithBlocks() {
logger.info("--> creating a snapshot is allowed when the cluster is read only");
try {
setClusterReadOnly(true);
- assertThat(
- client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(),
- equalTo(RestStatus.OK)
- );
+ assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1")
+ .setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK));
} finally {
setClusterReadOnly(false);
}
@@ -107,11 +105,8 @@ public void testCreateSnapshotWithIndexBlocks() {
logger.info("--> creating a snapshot is not blocked when an index is read only");
try {
enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY);
- assertThat(
- client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1")
- .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(),
- equalTo(RestStatus.OK)
- );
+ assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1")
+ .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK));
} finally {
disableIndexBlock(INDEX_NAME, SETTING_READ_ONLY);
}
@@ -119,16 +114,11 @@ public void testCreateSnapshotWithIndexBlocks() {
logger.info("--> creating a snapshot is blocked when an index is blocked for reads");
try {
enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ);
- assertBlocked(
- client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(COMMON_INDEX_NAME_MASK),
- IndexMetaData.INDEX_READ_BLOCK
- );
+ assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2")
+ .setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK);
logger.info("--> creating a snapshot is not blocked when an read-blocked index is not part of the snapshot");
- assertThat(
- client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").
- setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(),
- equalTo(RestStatus.OK)
- );
+ assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2")
+ .setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK));
} finally {
disableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ);
}
@@ -151,10 +141,8 @@ public void testRestoreSnapshotWithBlocks() {
logger.info("--> restoring a snapshot is blocked when the cluster is read only");
try {
setClusterReadOnly(true);
- assertBlocked(
- client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME),
- MetaData.CLUSTER_READ_ONLY_BLOCK
- );
+ assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME),
+ MetaData.CLUSTER_READ_ONLY_BLOCK);
} finally {
setClusterReadOnly(false);
}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java
index f7114a52f3db7..e2a07063d48d5 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java
@@ -40,8 +40,8 @@ public void testSerialization() throws Exception {
ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()).metaData(randomBoolean())
.nodes(randomBoolean()).blocks(randomBoolean()).indices("testindex", "testindex2").indicesOptions(indicesOptions);
- Version testVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(),
- Version.CURRENT);
+ Version testVersion = VersionUtils.randomVersionBetween(random(),
+ Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(testVersion);
clusterStateRequest.writeTo(output);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
index 4d6dc3bf43ba5..bf77cdeebd067 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
@@ -54,8 +54,8 @@ private void assertCounts(ClusterStatsNodes.Counts counts, int total, Map fieldNames, Ma
// check overridden by keyword analyzer ...
if (perFieldAnalyzer.containsKey(fieldName)) {
TermsEnum iterator = terms.iterator();
- assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here"));
+ assertThat("Analyzer for " + fieldName + " should have been overridden!",
+ iterator.next().utf8ToString(), equalTo("some text here"));
assertThat(iterator.next(), nullValue());
}
validFields.add(fieldName);
diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java
index 2f75f6df1a88e..08751ffe058ec 100644
--- a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java
+++ b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java
@@ -118,7 +118,8 @@ public void testMultiTermVectorsWithVersion() throws Exception {
//Version from Lucene index
refresh();
response = client().prepareMultiTermVectors()
- .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field")
+ .version(Versions.MATCH_ANY).realtime(false))
.add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false))
.add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false))
.get();
diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
index 216c1802956e8..9a8bb38d8cd2d 100644
--- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
+++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
@@ -269,13 +269,14 @@ public void testFieldTypeToTermVectorString() throws Exception {
String ftOpts = FieldMapper.termVectorOptionsToString(ft);
assertThat("with_positions_payloads", equalTo(ftOpts));
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(null);
- boolean exceptiontrown = false;
+ boolean exceptionThrown = false;
try {
TypeParsers.parseTermVector("", ftOpts, builder);
} catch (MapperParsingException e) {
- exceptiontrown = true;
+ exceptionThrown = true;
}
- assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false));
+ assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.",
+ exceptionThrown, equalTo(false));
}
public void testTermVectorStringGenerationWithoutPositions() throws Exception {