Skip to content

Commit 7496032

Browse files
committed
Merge branch '6.x' into ccr-6.x
* 6.x: [DOCS] Omit shard failures assertion for incompatible responses (#31430) [DOCS] Move licensing APIs to docs (#31445) backport of: add is-write-index flag to aliases (#30942) (#31412) backport of: Add rollover-creation-date setting to rolled over index (#31144) (#31413) [Docs] Extend Homebrew installation instructions (#28902) [Docs] Mention ip_range datatypes on ip type page (#31416) Multiplexing token filter (#31208) Fix use of time zone in date_histogram rewrite (#31407) Revert "Mute DefaultShardsIT#testDefaultShards test" [DOCS] Fixes code snippet testing for machine learning (#31189) Security: fix joining cluster with production license (#31341) [DOCS] Updated version in Info API example [DOCS] Moves the info API to docs (#31121) Revert "Increasing skip version for failing test on 6.x" Preserve response headers on cluster update task (#31421) [DOCS] Add code snippet testing for more ML APIs (#31404) Docs: Advice for reindexing many indices (#31279)
2 parents 35243f2 + 60b4be6 commit 7496032

File tree

90 files changed

+1849
-247
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+1849
-247
lines changed

buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import org.gradle.api.tasks.OutputDirectory
2727

2828
import java.nio.file.Files
2929
import java.nio.file.Path
30-
import java.util.regex.Matcher
3130

3231
/**
3332
* Generates REST tests for each snippet marked // TEST.
@@ -100,6 +99,14 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
10099
return snippet.language == 'js' || snippet.curl
101100
}
102101

102+
/**
103+
* Certain requests should not have the shard failure check because the
104+
* format of the response is incompatible i.e. it is not a JSON object.
105+
*/
106+
static shouldAddShardFailureCheck(String path) {
107+
return path.startsWith('_cat') == false && path.startsWith('_xpack/ml/datafeeds/') == false
108+
}
109+
103110
/**
104111
* Converts Kibana's block quoted strings into standard JSON. These
105112
* {@code """} delimited strings can be embedded in CONSOLE and can
@@ -308,13 +315,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
308315
* no shard succeeds. But we need to fail the tests on all of these
309316
* because they mean invalid syntax or broken queries or something
310317
* else that we don't want to teach people to do. The REST test
311-
* framework doesn't allow us to has assertions in the setup
312-
* section so we have to skip it there. We also have to skip _cat
313-
* actions because they don't return json so we can't is_false
314-
* them. That is ok because they don't have this
315-
* partial-success-is-success thing.
318+
* framework doesn't allow us to have assertions in the setup
319+
* section so we have to skip it there. We also omit the assertion
320+
* from APIs that don't return a JSON object
316321
*/
317-
if (false == inSetup && false == path.startsWith('_cat')) {
322+
if (false == inSetup && shouldAddShardFailureCheck(path)) {
318323
current.println(" - is_false: _shards.failures")
319324
}
320325
}

buildSrc/src/test/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTaskTest.groovy

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@
1919

2020
package org.elasticsearch.gradle.doc
2121

22-
import org.elasticsearch.gradle.doc.SnippetsTask.Snippet
23-
import org.gradle.api.InvalidUserDataException
24-
22+
import static org.elasticsearch.gradle.doc.RestTestsFromSnippetsTask.shouldAddShardFailureCheck
2523
import static org.elasticsearch.gradle.doc.RestTestsFromSnippetsTask.replaceBlockQuote
2624

2725
class RestTestFromSnippetsTaskTest extends GroovyTestCase {
@@ -47,4 +45,10 @@ class RestTestFromSnippetsTaskTest extends GroovyTestCase {
4745
assertEquals("\"foo\": \"bort\\n baz\"",
4846
replaceBlockQuote("\"foo\": \"\"\"bort\n baz\"\"\""));
4947
}
48+
49+
void testIsDocWriteRequest() {
50+
assertTrue(shouldAddShardFailureCheck("doc-index/_search"));
51+
assertFalse(shouldAddShardFailureCheck("_cat"))
52+
assertFalse(shouldAddShardFailureCheck("_xpack/ml/datafeeds/datafeed-id/_preview"));
53+
}
5054
}

distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131

3232
public class DefaultShardsIT extends ESRestTestCase {
3333

34-
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31408")
3534
public void testDefaultShards() throws IOException {
3635
final Response response = client().performRequest(new Request("PUT", "/index"));
3736
final String warning = response.getHeader("Warning");

docs/reference/analysis/tokenfilters.asciidoc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ include::tokenfilters/word-delimiter-tokenfilter.asciidoc[]
3535

3636
include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[]
3737

38+
include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
39+
3840
include::tokenfilters/stemmer-tokenfilter.asciidoc[]
3941

4042
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
[[analysis-multiplexer-tokenfilter]]
2+
=== Multiplexer Token Filter
3+
4+
A token filter of type `multiplexer` will emit multiple tokens at the same position,
5+
each version of the token having been run through a different filter. Identical
6+
output tokens at the same position will be removed.
7+
8+
WARNING: If the incoming token stream has duplicate tokens, then these will also be
9+
removed by the multiplexer
10+
11+
[float]
12+
=== Options
13+
[horizontal]
14+
filters:: a list of token filters to apply to incoming tokens. These can be any
15+
token filters defined elsewhere in the index mappings. Filters can be chained
16+
using a comma-delimited string, so for example `"lowercase, porter_stem"` would
17+
apply the `lowercase` filter and then the `porter_stem` filter to a single token.
18+
19+
WARNING: Shingle or multi-word synonym token filters will not function normally
20+
when they are declared in the filters array because they read ahead internally
21+
which is unsupported by the multiplexer
22+
23+
preserve_original:: if `true` (the default) then emit the original token in
24+
addition to the filtered tokens
25+
26+
27+
[float]
28+
=== Settings example
29+
30+
You can set it up like:
31+
32+
[source,js]
33+
--------------------------------------------------
34+
PUT /multiplexer_example
35+
{
36+
"settings" : {
37+
"analysis" : {
38+
"analyzer" : {
39+
"my_analyzer" : {
40+
"tokenizer" : "standard",
41+
"filter" : [ "my_multiplexer" ]
42+
}
43+
},
44+
"filter" : {
45+
"my_multiplexer" : {
46+
"type" : "multiplexer",
47+
"filters" : [ "lowercase", "lowercase, porter_stem" ]
48+
}
49+
}
50+
}
51+
}
52+
}
53+
--------------------------------------------------
54+
// CONSOLE
55+
56+
And test it like:
57+
58+
[source,js]
59+
--------------------------------------------------
60+
POST /multiplexer_example/_analyze
61+
{
62+
"analyzer" : "my_analyzer",
63+
"text" : "Going HOME"
64+
}
65+
--------------------------------------------------
66+
// CONSOLE
67+
// TEST[continued]
68+
69+
And it'd respond:
70+
71+
[source,js]
72+
--------------------------------------------------
73+
{
74+
"tokens": [
75+
{
76+
"token": "Going",
77+
"start_offset": 0,
78+
"end_offset": 5,
79+
"type": "<ALPHANUM>",
80+
"position": 0
81+
},
82+
{
83+
"token": "going",
84+
"start_offset": 0,
85+
"end_offset": 5,
86+
"type": "<ALPHANUM>",
87+
"position": 0
88+
},
89+
{
90+
"token": "go",
91+
"start_offset": 0,
92+
"end_offset": 5,
93+
"type": "<ALPHANUM>",
94+
"position": 0
95+
},
96+
{
97+
"token": "HOME",
98+
"start_offset": 6,
99+
"end_offset": 10,
100+
"type": "<ALPHANUM>",
101+
"position": 1
102+
},
103+
{
104+
"token": "home", <1>
105+
"start_offset": 6,
106+
"end_offset": 10,
107+
"type": "<ALPHANUM>",
108+
"position": 1
109+
}
110+
]
111+
}
112+
--------------------------------------------------
113+
// TESTRESPONSE
114+
115+
<1> The stemmer has also emitted a token `home` at position 1, but because it is a
116+
duplicate of this token it has been removed from the token stream

docs/reference/docs/reindex.asciidoc

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1028,11 +1028,38 @@ number of slices.
10281028
Whether query or indexing performance dominates the runtime depends on the
10291029
documents being reindexed and cluster resources.
10301030

1031+
[float]
1032+
=== Reindexing many indices
1033+
If you have many indices to reindex it is generally better to reindex them
1034+
one at a time rather than using a glob pattern to pick up many indices. That
1035+
way you can resume the process if there are any errors by removing the
1036+
partially completed index and starting over at that index. It also makes
1037+
parallelizing the process fairly simple: split the list of indices to reindex
1038+
and run each list in parallel.
1039+
1040+
One off bash scripts seem to work nicely for this:
1041+
1042+
[source,bash]
1043+
----------------------------------------------------------------
1044+
for index in i1 i2 i3 i4 i5; do
1045+
curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
1046+
"source": {
1047+
"index": "'$index'"
1048+
},
1049+
"dest": {
1050+
"index": "'$index'-reindexed"
1051+
}
1052+
}'
1053+
done
1054+
----------------------------------------------------------------
1055+
// NOTCONSOLE
1056+
10311057
[float]
10321058
=== Reindex daily indices
10331059

1034-
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
1035-
to reindex daily indices to apply a new template to the existing documents.
1060+
Notwithstanding the above advice, you can use `_reindex` in combination with
1061+
<<modules-scripting-painless, Painless>> to reindex daily indices to apply
1062+
a new template to the existing documents.
10361063

10371064
Assuming you have indices consisting of documents as follows:
10381065

docs/reference/getting-started.asciidoc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,9 @@ On macOS, Elasticsearch can also be installed via https://brew.sh[Homebrew]:
158158
brew install elasticsearch
159159
--------------------------------------------------
160160

161+
If installation succeeds, Homebrew will finish by saying that you can start Elasticsearch by entering
162+
`elasticsearch`. Do that now. The expected response is described below, under <<successfully-running-node>>.
163+
161164
[float]
162165
=== Installation example with MSI Windows Installer
163166

@@ -216,6 +219,7 @@ And now we are ready to start our node and single cluster:
216219
--------------------------------------------------
217220

218221
[float]
222+
[[successfully-running-node]]
219223
=== Successfully running node
220224

221225
If everything goes well with installation, you should see a bunch of messages that look like below:

docs/reference/indices/aliases.asciidoc

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,94 @@ GET /alias2/_search?q=user:kimchy&routing=2,3
244244
// CONSOLE
245245
// TEST[continued]
246246

247+
[float]
248+
[[aliases-write-index]]
249+
==== Write Index
250+
251+
It is possible to associate the index pointed to by an alias as the write index.
252+
When specified, all index and update requests against an alias that point to multiple
253+
indices will attempt to resolve to the one index that is the write index.
254+
Only one index per alias can be assigned to be the write index at a time. If no write index is specified
255+
and there are multiple indices referenced by an alias, then writes will not be allowed.
256+
257+
It is possible to specify an index associated with an alias as a write index using both the aliases API
258+
and index creation API.
259+
260+
[source,js]
261+
--------------------------------------------------
262+
POST /_aliases
263+
{
264+
"actions" : [
265+
{
266+
"add" : {
267+
"index" : "test",
268+
"alias" : "alias1",
269+
"is_write_index" : true
270+
}
271+
}
272+
]
273+
}
274+
--------------------------------------------------
275+
// CONSOLE
276+
// TEST[s/^/PUT test\n/]
277+
278+
In this example, we associate the alias `alias1` to both `test` and `test2`, where
279+
`test` will be the index chosen for writing to.
280+
281+
[source,js]
282+
--------------------------------------------------
283+
PUT /alias1/_doc/1
284+
{
285+
"foo": "bar"
286+
}
287+
--------------------------------------------------
288+
// CONSOLE
289+
// TEST[continued]
290+
291+
The new document that was indexed to `/alias1/_doc/1` will be indexed as if it were
292+
`/test/_doc/1`.
293+
294+
[source,js]
295+
--------------------------------------------------
296+
GET /test/_doc/1
297+
--------------------------------------------------
298+
// CONSOLE
299+
// TEST[continued]
300+
301+
To swap which index is the write index for an alias, the Aliases API can be leveraged to
302+
do an atomic swap. The swap is not dependent on the ordering of the actions.
303+
304+
[source,js]
305+
--------------------------------------------------
306+
POST /_aliases
307+
{
308+
"actions" : [
309+
{
310+
"add" : {
311+
"index" : "test",
312+
"alias" : "alias1",
313+
"is_write_index" : true
314+
}
315+
}, {
316+
"add" : {
317+
"index" : "test2",
318+
"alias" : "alias1",
319+
"is_write_index" : false
320+
}
321+
}
322+
]
323+
}
324+
--------------------------------------------------
325+
// CONSOLE
326+
// TEST[s/^/PUT test\nPUT test2\n/]
327+
328+
[IMPORTANT]
329+
=====================================
330+
Aliases that do not explicitly set `is_write_index: true` for an index, and
331+
only reference one index, will have that referenced index behave as if it is the write index
332+
until an additional index is referenced. At that point, there will be no write index and
333+
writes will be rejected.
334+
=====================================
247335

248336
[float]
249337
[[alias-adding]]

x-pack/docs/en/rest-api/license/delete-license.asciidoc renamed to docs/reference/licensing/delete-license.asciidoc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
[role="xpack"]
2+
[testenv="basic"]
23
[[delete-license]]
34
=== Delete License API
45

@@ -41,3 +42,4 @@ When the license is successfully deleted, the API returns the following response
4142
"acknowledged": true
4243
}
4344
------------------------------------------------------------
45+
// NOTCONSOLE

x-pack/docs/en/rest-api/license/get-basic-status.asciidoc renamed to docs/reference/licensing/get-basic-status.asciidoc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
[role="xpack"]
2+
[testenv="basic"]
23
[[get-basic-status]]
34
=== Get Basic Status API
45

0 commit comments

Comments
 (0)