diff --git a/docs/versioned-plugins/filters/elasticsearch-index.asciidoc b/docs/versioned-plugins/filters/elasticsearch-index.asciidoc index a9c0cfd84..d05920129 100644 --- a/docs/versioned-plugins/filters/elasticsearch-index.asciidoc +++ b/docs/versioned-plugins/filters/elasticsearch-index.asciidoc @@ -5,6 +5,7 @@ include::{include_path}/version-list-intro.asciidoc[] |======================================================================= | Version | Release Date +| <> | 2024-12-09 | <> | 2023-09-29 | <> | 2023-09-29 | <> | 2023-09-08 @@ -38,6 +39,7 @@ include::{include_path}/version-list-intro.asciidoc[] | <> | 2017-05-03 |======================================================================= +include::elasticsearch-v3.16.2.asciidoc[] include::elasticsearch-v3.16.1.asciidoc[] include::elasticsearch-v3.16.0.asciidoc[] include::elasticsearch-v3.15.3.asciidoc[] diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.16.2.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.16.2.asciidoc new file mode 100644 index 000000000..f13f36957 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.16.2.asciidoc @@ -0,0 +1,575 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.16.2 +:release_date: 2024-12-09 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.16.2/CHANGELOG.md +:include_path: ../include/6.x +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to +an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. + +[source,ruby] +-------------------------------------------------- +if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event.set('duration_hrs', (event.get('@timestamp') - event.get('started')) / 3600)" + } +} +-------------------------------------------------- + +The example below reproduces the above example but utilises the query_template. +This query_template represents a full Elasticsearch query DSL and supports the +standard Logstash field substitution syntax. The example below issues +the same query as the first example but uses the template shown. + +[source,ruby] +-------------------------------------------------- +if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event.set('duration_hrs', (event.get('@timestamp') - event.get('started')) / 3600)" + } +} +-------------------------------------------------- + +template.json: + +[source,json] +[source,json] +-------------------------------------------------- +{ + "size": 1, + "sort" : [ { "@timestamp" : "desc" } ], + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp"] +} +-------------------------------------------------- + +As illustrated above, through the use of 'opid', fields from the Logstash +events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +Notice also that when you use `query_template`, the Logstash attributes `result_size` +and `sort` will be ignored. They should be specified directly in the JSON +template, as shown in the example above. + +[id="{version}-plugins-{type}s-{plugin}-auth"] +==== Authentication + +Authentication to a secure Elasticsearch cluster is possible using _one_ of the following options: + +* <<{version}-plugins-{type}s-{plugin}-user>> AND <<{version}-plugins-{type}s-{plugin}-password>> +* <<{version}-plugins-{type}s-{plugin}-cloud_auth>> +* <<{version}-plugins-{type}s-{plugin}-api_key>> +* <<{version}-plugins-{type}s-{plugin}-keystore>> and/or <<{version}-plugins-{type}s-{plugin}-keystore_password>> + +[id="{version}-plugins-{type}s-{plugin}-autz"] +==== Authorization + +Authorization to a secure Elasticsearch cluster requires `read` permission at index level and `monitoring` permissions at cluster level. +The `monitoring` permission at cluster level is necessary to perform periodic connectivity checks. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> and the <<{version}-plugins-{type}s-{plugin}-deprecated-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregation_fields>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-api_key>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ca_trusted_fingerprint>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cloud_auth>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-cloud_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-query>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_failure>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_status>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|__Deprecated__ +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |list of {logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cipher_suites>> |list of {logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enabled>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_supported_protocols>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verification_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["full", "none"]`|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregation_fields"] +===== `aggregation_fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +Hash of aggregation names to copy from elasticsearch response into Logstash event fields + +Example: +[source,ruby] + filter { + elasticsearch { + aggregation_fields => { + "my_agg_name" => "my_ls_field" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Authenticate using Elasticsearch API key. Note that this option also requires +enabling the <<{version}-plugins-{type}s-{plugin}-ssl_enabled>> option. + +Format is `id:api_key` where `id` and `api_key` are as returned by the +Elasticsearch {ref}/security-api-create-api-key.html[Create API key API]. + +[id="{version}-plugins-{type}s-{plugin}-ca_trusted_fingerprint"] +===== `ca_trusted_fingerprint` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string], and must contain exactly 64 hexadecimal characters. +* There is no default value for this setting. +* Use of this option _requires_ Logstash 8.3+ + +The SHA-256 fingerprint of an SSL Certificate Authority to trust, such as the autogenerated self-signed CA for an Elasticsearch cluster. + +[id="{version}-plugins-{type}s-{plugin}-cloud_auth"] +===== `cloud_auth` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Cloud authentication string (":" format) is an alternative for the `user`/`password` pair. + +For more info, check out the +{logstash-ref}/connecting-to-cloud.html[Logstash-to-Cloud documentation]. + +[id="{version}-plugins-{type}s-{plugin}-cloud_id"] +===== `cloud_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used. + +For more info, check out the +{logstash-ref}/connecting-to-cloud.html[Logstash-to-Cloud documentation]. + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +Hash of docinfo fields to copy from old event (found via elasticsearch) into new event + +Example: +[source,ruby] + filter { + elasticsearch { + docinfo_fields => { + "_id" => "document_id" + "_index" => "document_index" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `{}` + +An array of fields to copy from the old event (found via elasticsearch) into the +new event, currently being processed. + +In the following example, the values of `@timestamp` and `event_id` on the event +found via elasticsearch are copied to the current event's +`started` and `start_id` fields, respectively: + +[source,ruby] +-------------------------------------------------- +fields => { + "@timestamp" => "started" + "event_id" => "start_id" +} +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. +Field substitution (e.g. `index-name-%{date_field}`) is available + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + +* Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] +* There is no default value for this setting. + +Set the address of a forward HTTP proxy. +An empty string is treated as if proxy was not set, and is useful when using +environment variables e.g. `proxy => '${LS_PROXY:}'`. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Elasticsearch query string. More information is available in the +{ref}/query-dsl-query-string-query.html#query-string-syntax[Elasticsearch query +string documentation]. +Use either `query` or `query_template`. + + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. More information is available in +the {ref}/query-dsl.html[Elasticsearch query documentation]. +Use either `query` or `query_template`. + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + +* Value type is {logstash-ref}/configuration-file-structure.html#number[number] +* Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-retry_on_failure"] +===== `retry_on_failure` + +* Value type is {logstash-ref}/configuration-file-structure.html#number[number] +* Default value is `0` (retries disabled) + +How many times to retry an individual failed request. + +When enabled, retry requests that result in connection errors or an HTTP status code included in <<{version}-plugins-{type}s-{plugin}-retry_on_status>> + +[id="{version}-plugins-{type}s-{plugin}-retry_on_status"] +===== `retry_on_status` + +* Value type is {logstash-ref}/configuration-file-structure.html#array[array] +* Default value is an empty list `[]` + +Which HTTP Status codes to consider for retries (in addition to connection errors) when using <<{version}-plugins-{type}s-{plugin}-retry_on_failure>>, + + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file. + +NOTE: This setting can be used only if <<{version}-plugins-{type}s-{plugin}-ssl_key>> is set. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is a list of {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting + +The .cer or .pem files to validate the server's certificate. + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cipher_suites"] +===== `ssl_cipher_suites` + * Value type is a list of {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting + +The list of cipher suites to use, listed by priorities. +Supported cipher suites vary depending on the Java and protocol versions. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_enabled"] +===== `ssl_enabled` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. +Leaving this unspecified will use whatever scheme is specified in the URLs listed in <<{version}-plugins-{type}s-{plugin}-hosts>> or extracted from the <<{version}-plugins-{type}s-{plugin}-cloud_id>>. +If no explicit protocol is specified plain HTTP will be used. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +OpenSSL-style RSA private key that corresponds to the <<{version}-plugins-{type}s-{plugin}-ssl_certificate>>. + +NOTE: This setting can be used only if <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> is set. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_path"] +===== `ssl_keystore_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either `.jks` or `.p12` + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value can be any of: `jks`, `pkcs12` + * If not provided, the value will be inferred from the keystore filename. + +The format of the keystore file. It must be either `jks` or `pkcs12`. + +[id="{version}-plugins-{type}s-{plugin}-ssl_supported_protocols"] +===== `ssl_supported_protocols` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'` + * Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. + `'TLSv1.1'` is not considered secure and is only provided for legacy applications. + +List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster. + +For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the +`LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash. + +NOTE: If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, +the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in +the *$JDK_HOME/conf/security/java.security* configuration file. That is, `TLSv1.1` needs to be removed from the list. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_path"] +===== `ssl_truststore_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The truststore to validate the server's certificate. +It can be either `.jks` or `.p12`. + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value can be any of: `jks`, `pkcs12` + * If not provided, the value will be inferred from the truststore filename. + +The format of the truststore file. It must be either `jks` or `pkcs12`. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verification_mode"] +===== `ssl_verification_mode` + + * Value can be any of: `full`, `none` + * Default value is `full` + +Defines how to verify the certificates presented by another party in the TLS connection: + +`full` validates that the server certificate has an issue date that’s within +the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and +has a hostname or IP address that matches the names within the certificate. + +`none` performs no certificate validation. + +WARNING: Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up previous log event information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Basic Auth - username + + +[id="{version}-plugins-{type}s-{plugin}-deprecated-options"] +==== Elasticsearch Filter Deprecated Configuration Options + +This plugin supports the following deprecated configurations. + +WARNING: Deprecated options are subject to removal in future releases. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting|Input type|Replaced by +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|<<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|<<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>> +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|<<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` +deprecated[3.15.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#path[path] +* There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` +deprecated[3.15.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_enabled>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` +deprecated[3.15.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#path[path] +* There is no default value for this setting. + +The keystore used to present a certificate to the server. It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` +deprecated[3.15.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#password[password] +* There is no default value for this setting. + +Set the keystore password + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc index 4e1dbc926..07d7be4fc 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc @@ -5,6 +5,7 @@ include::{include_path}/version-list-intro.asciidoc[] |======================================================================= | Version | Release Date +| <> | 2024-12-09 | <> | 2024-09-13 | <> | 2024-09-13 | <> | 2024-06-25 @@ -128,6 +129,7 @@ include::{include_path}/version-list-intro.asciidoc[] | <> | 2017-05-26 |======================================================================= +include::elasticsearch-v11.22.10.asciidoc[] include::elasticsearch-v11.22.9.asciidoc[] include::elasticsearch-v11.22.8.asciidoc[] include::elasticsearch-v11.22.7.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v11.22.10.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v11.22.10.asciidoc new file mode 100644 index 000000000..8fd6711c1 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v11.22.10.asciidoc @@ -0,0 +1,1422 @@ +:plugin: elasticsearch +:type: output +:no_codec: + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v11.22.10 +:release_date: 2024-12-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v11.22.10/CHANGELOG.md +:include_path: ../include/6.x +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Elasticsearch provides near real-time search and analytics for all types of +data. The Elasticsearch output plugin can store both time series datasets (such +as logs, events, and metrics) and non-time series data in Elasticsearch. + +You can https://www.elastic.co/elasticsearch/[learn more about Elasticsearch] on +the website landing page or in the {ref}[Elasticsearch documentation]. + +.Compatibility Note +[NOTE] +===== +When connected to Elasticsearch 7.x, modern versions of this plugin +don't use the document-type when inserting documents, unless the user +explicitly sets <<{version}-plugins-{type}s-{plugin}-document_type>>. + +If you are using an earlier version of Logstash and wish to connect to +Elasticsearch 7.x, first upgrade Logstash to version 6.8 to ensure it +picks up changes to the Elasticsearch index template. + +If you are using a custom <<{version}-plugins-{type}s-{plugin}-template>>, +ensure your template uses the `_doc` document-type before +connecting to Elasticsearch 7.x. +===== + +[id="{version}-plugins-{type}s-{plugin}-serverless"] +==== {ls} to {es-serverless} + +You can use this plugin to send your {ls} data to {es-serverless}. +Some differences to note between {es-serverless} and self-managed {es}: + +* Use *API keys* to access {serverless-full} from {ls}. +Any user-based security settings in your <<{version}-plugins-outputs-elasticsearch,{es} output plugin>> configuration are ignored and may cause errors. +* {es-serverless} uses *data streams* and {ref}/data-stream-lifecycle.html[{dlm} ({dlm-init})] instead of {ilm} ({ilm-init}). +Any {ilm-init} settings in your <<{version}-plugins-outputs-elasticsearch,{es} output plugin>> configuration are ignored and may cause errors. +* *{ls} monitoring* is available through the https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md[{ls} Integration] in {serverless-docs}/observability/what-is-observability-serverless[Elastic Observability] on {serverless-full}. + +.Known issue for {ls} to {es-serverless} +**** +The logstash-output-elasticsearch `hosts` setting on {serverless-short} defaults the port to 9200 when omitted. +Set the value to port :443 instead. +**** + +For more info on sending data from {ls} to {es-serverless}, check out the {serverless-docs}/elasticsearch/what-is-elasticsearch-serverless[{es-serverless} docs]. + +[id="{version}-plugins-{type}s-{plugin}-ess"] +==== Hosted {es} Service on Elastic Cloud + +{ess-leadin} + +==== Compatibility with the Elastic Common Schema (ECS) + +This plugin will persist events to Elasticsearch in the shape produced by +your pipeline, and _cannot_ be used to re-shape the event structure into a +shape that complies with ECS. To produce events that fully comply with ECS, +you will need to populate ECS-defined fields throughout your pipeline +definition. + +However, the Elasticsearch Index Templates it manages can be configured to +be ECS-compatible by setting <<{version}-plugins-{type}s-{plugin}-ecs_compatibility>>. +By having an ECS-compatible template in place, we can ensure that Elasticsearch +is prepared to create and index fields in a way that is compatible with ECS, +and will correctly reject events with fields that conflict and cannot be coerced. + +[id="{version}-plugins-{type}s-{plugin}-data-streams"] +==== Data streams + +The {es} output plugin can store both time series datasets (such +as logs, events, and metrics) and non-time series data in Elasticsearch. + +Use the data stream options for indexing time series datasets (such +as logs, metrics, and events) into {es} and {es-serverless}: + +* <<{version}-plugins-{type}s-{plugin}-data_stream>> +* <<{version}-plugins-{type}s-{plugin}-data_stream_auto_routing>> +* <<{version}-plugins-{type}s-{plugin}-data_stream_dataset>> +* <<{version}-plugins-{type}s-{plugin}-data_stream_namespace>> +* <<{version}-plugins-{type}s-{plugin}-data_stream_sync_fields>> +* <<{version}-plugins-{type}s-{plugin}-data_stream_type>> + +IMPORTANT: <<{version}-plugins-{type}s-{plugin}-ecs_compatibility,ECS compatibility>> must be enabled (set to `v1` or `v8`) for data streams to work properly. + +[id="{version}-plugins-{type}s-{plugin}-ds-examples"] +===== Data stream configuration examples + +**Example: Basic default configuration** + +[source,sh] +----- +output { + elasticsearch { + hosts => "hostname" + data_stream => "true" + } +} +----- + +This example shows the minimal settings for processing data streams. Events +with `data_stream.*`` fields are routed to the appropriate data streams. If the +fields are missing, routing defaults to `logs-generic-default`. + +**Example: Customize data stream name** + +[source,sh] +----- +output { + elasticsearch { + hosts => "hostname" + data_stream => "true" + data_stream_type => "metrics" + data_stream_dataset => "foo" + data_stream_namespace => "bar" + } +} +----- + + +==== Writing to different indices: best practices + +NOTE: You cannot use dynamic variable substitution when `ilm_enabled` is `true` +and when using `ilm_rollover_alias`. + +If you're sending events to the same Elasticsearch cluster, but you're targeting different indices you can: + + * use different Elasticsearch outputs, each one with a different value for the `index` parameter + * use one Elasticsearch output and use the dynamic variable substitution for the `index` parameter + +Each Elasticsearch output is a new client connected to the cluster: + + * it has to initialize the client and connect to Elasticsearch (restart time is longer if you have more clients) + * it has an associated connection pool + +In order to minimize the number of open connections to Elasticsearch, maximize +the bulk size and reduce the number of "small" bulk requests (which could easily +fill up the queue), it is usually more efficient to have a single Elasticsearch +output. + +Example: +[source,ruby] + output { + elasticsearch { + index => "%{[some_field][sub_field]}-%{+YYYY.MM.dd}" + } + } + +**What to do in case there is no field in the event containing the destination index prefix?** + +You can use the `mutate` filter and conditionals to add a +{logstash-ref}/event-dependent-configuration.html#metadata[`[@metadata]` field] +to set the destination index for each event. The `[@metadata]` fields will not +be sent to Elasticsearch. + +Example: +[source,ruby] + filter { + if [log_type] in [ "test", "staging" ] { + mutate { add_field => { "[@metadata][target_index]" => "test-%{+YYYY.MM}" } } + } else if [log_type] == "production" { + mutate { add_field => { "[@metadata][target_index]" => "prod-%{+YYYY.MM.dd}" } } + } else { + mutate { add_field => { "[@metadata][target_index]" => "unknown-%{+YYYY}" } } + } + } + output { + elasticsearch { + index => "%{[@metadata][target_index]}" + } + } + + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + + * 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<{version}-plugins-{type}s-{plugin}-dlq-policy>> for more info. + * 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[id="{version}-plugins-{type}s-{plugin}-dlq-policy"] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +{logstash-ref}/dead-letter-queues.html[dead-letter-queues] for more information about processing events in the DLQ. +The list of error codes accepted for DLQ could be customized with <<{version}-plugins-{type}s-{plugin}-dlq_custom_codes>> +but should be used only in motivated cases. + +[id="{version}-plugins-{type}s-{plugin}-ilm"] +==== {ilm-cap} ({ilm-init}) + +[NOTE] +-- +* The {ilm-cap} ({ilm-init}) feature does not apply for {es-serverless}. +Any {ilm-init} settings in your plugin configuration are ignored and may cause errors. +* The {ilm-init} feature requires plugin version `9.3.1` or higher. +* This feature requires an {es} instance of 6.6.0 or higher with at least a Basic license +-- + +{ls} can use {ref}/index-lifecycle-management.html[{ilm}] to automate the management of indices over time. + +The use of {ilm} is controlled by the `ilm_enabled` +setting. By default, this setting detects whether the Elasticsearch instance +supports {ilm-init}, and uses it if it is available. `ilm_enabled` can also be set to +`true` or `false` to override the automatic detection, or disable {ilm-init}. + +This will overwrite the index settings and adjust the {ls} template to write +the necessary settings for the template to support {ilm}, +including the index policy and rollover alias to be used. + +{ls} creates a rollover alias for the indices to be written to, +including a pattern for how the actual indices will be named, and unless an ILM +policy that already exists has been specified, a default policy will also be +created. The default policy is configured to rollover an index when it reaches +either 50 gigabytes in size, or is 30 days old, whichever happens first. + +The default rollover alias is called `logstash`, with a default pattern for the +rollover index of `{now/d}-00001`, which will name indices on the date that the +index is rolled over, followed by an incrementing number. Note that the pattern +must end with a dash and a number that will be incremented. + +See the {ref}/indices-rollover-index.html#_using_date_math_with_the_rollover_api[Rollover +API documentation] for more details on naming. + +The rollover alias, ilm pattern and policy can be modified. + +See config below for an example: +[source,ruby] + output { + elasticsearch { + ilm_rollover_alias => "custom" + ilm_pattern => "000001" + ilm_policy => "custom_policy" + } + } + +[NOTE] +-- +* Custom ILM policies must already exist on the {es} cluster before they can be used. +* If the rollover alias or pattern is modified, the index template will need to be +overwritten as the settings `index.lifecycle.name` and +`index.lifecycle.rollover_alias` are automatically written to the template +* If the index property is supplied in the output definition, it will be overwritten by the rollover alias. +-- + +==== Batch Sizes + +This plugin attempts to send batches of events to the {ref}/docs-bulk.html[{es} +Bulk API] as a single request. However, if a batch exceeds 20MB we break it up +into multiple bulk requests. If a single document exceeds 20MB it is sent as a +single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of +https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin always reads compressed responses from {es}. +By default, it sends compressed bulk requests to {es}. + +If you are concerned about bandwidth, you can set a higher <<{version}-plugins-{type}s-{plugin}-compression_level>> to trade CPU capacity for a reduction in network IO. + +==== Authentication + +Authentication to a secure Elasticsearch cluster is possible using one of the +`user`/`password`, `cloud_auth` or `api_key` options. + +[id="{version}-plugins-{type}s-{plugin}-autz"] +==== Authorization + +Authorization to a secure Elasticsearch cluster requires `read` permission at +index level and `monitoring` permissions at cluster level. The `monitoring` +permission at cluster level is necessary to perform periodic connectivity +checks. + +[id="{version}-plugins-{type}s-{plugin}-handling-non-utf-8"] +==== Handling non UTF-8 data + +This plugin transmits events to Elasticsearch using a JSON API, and therefore requires that all string values in events to be valid UTF-8. +When a string value on an event contains one or more byte sequences that are not valid in UTF-8, each offending byte sequence is replaced with the UTF-8 replacement character (`\uFFFD`). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the +<<{version}-plugins-{type}s-{plugin}-common-options>> and the <<{version}-plugins-{type}s-{plugin}-deprecated-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-api_key>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ca_trusted_fingerprint>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cloud_auth>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-cloud_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-compression_level>> |{logstash-ref}/configuration-file-structure.html#number[number], one of `[0 ~ 9]`|No +| <<{version}-plugins-{type}s-{plugin}-custom_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-data_stream>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["true", "false", "auto"]`|No +| <<{version}-plugins-{type}s-{plugin}-data_stream_auto_routing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-data_stream_dataset>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-data_stream_namespace>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-data_stream_sync_fields>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-data_stream_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-dlq_custom_codes>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-dlq_on_failed_indexname_interpolation>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ecs_compatibility>> | {logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ilm_enabled>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["true", "false", "auto"]`|No +| <<{version}-plugins-{type}s-{plugin}-ilm_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ilm_policy>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ilm_rollover_alias>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-silence_errors_in_log>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |list of {logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cipher_suites>> |list of {logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enabled>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_supported_protocols>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>> |{logstash-ref}/configuration-file-structure.html#path[path]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verification_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["full", "none"]`|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_api>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["auto", "legacy", "composable"]`|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `create` for data streams, and `index` for non-time series data. + +The Elasticsearch action to perform. Valid actions are: + +- `index`: indexes a document (an event from Logstash). +- `delete`: deletes a document by id (An id is required for this action) +- `create`: indexes a document, fails if a document by that id already exists in the index. +- `update`: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `doc_as_upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action. + If resolved action is not in [`index`, `delete`, `create`, `update`], the event will not be sent to {es}. + Instead the event will be sent to the pipeline's {logstash-ref}/dead-letter-queues.html[dead-letter-queue (DLQ)] (if enabled), or it will be logged and dropped. + +For more details on actions, check out the {ref}/docs-bulk.html[Elasticsearch bulk API documentation]. + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Authenticate using Elasticsearch API key. +Note that this option also requires SSL/TLS, which can be enabled by supplying a <<{version}-plugins-{type}s-{plugin}-cloud_id>>, a list of HTTPS <<{version}-plugins-{type}s-{plugin}-hosts>>, or by setting <<{version}-plugins-{type}s-{plugin}-ssl,`ssl_enabled => true`>>. + +Format is `id:api_key` where `id` and `api_key` are as returned by the +Elasticsearch {ref}/security-api-create-api-key.html[Create API key API]. + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * The default value for this settings is `/_bulk?filter_path=errors,items.*.error,items.*.status` + +HTTP Path to perform the _bulk requests to +* This default bulk path is the concatenation of the value of `path` parameter and `/_bulk?filter_path=errors,items.*.error,items.*.status` +* The `filter_path` query parameter is appended to the bulk path to reduce the payload between logstash and elasticsearch. However, if a custom `filter_path` query parameter is included in the `bulk_path` setting, then that value will be used. + + +[id="{version}-plugins-{type}s-{plugin}-ca_trusted_fingerprint"] +===== `ca_trusted_fingerprint` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string], and must contain exactly 64 hexadecimal characters. +* There is no default value for this setting. +* Use of this option _requires_ Logstash 8.3+ + +The SHA-256 fingerprint of an SSL Certificate Authority to trust, such as the autogenerated self-signed CA for an Elasticsearch cluster. + +[id="{version}-plugins-{type}s-{plugin}-cloud_auth"] +===== `cloud_auth` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Cloud authentication string (":" format) is an alternative +for the `user`/`password` pair. + +For more details, check out the +{logstash-ref}/connecting-to-cloud.html[Logstash-to-Cloud documentation]. + +[id="{version}-plugins-{type}s-{plugin}-cloud_id"] +===== `cloud_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used. + +For more details, check out the +{logstash-ref}/connecting-to-cloud.html[Logstash-to-Cloud documentation]. + +[id="{version}-plugins-{type}s-{plugin}-compression_level"] +===== `compression_level` + + * Value can be any of: `0`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9` + * Default value is `1` + +The gzip compression level. Setting this value to `0` disables compression. +The compression level must be in the range of `1` (best speed) to `9` (best compression). + +Increasing the compression level will reduce the network usage but will increase the CPU usage. + +[id="{version}-plugins-{type}s-{plugin}-data_stream"] +===== `data_stream` + +* Value can be any of: `true`, `false` and `auto` +* Default is `false` in Logstash 7.x and `auto` starting in Logstash 8.0. + +Defines whether data will be indexed into an Elasticsearch data stream. +The other `data_stream_*` settings will be used only if this setting is enabled. + +Logstash handles the output as a data stream when the supplied configuration +is compatible with data streams and this value is set to `auto`. +Note that <<{version}-plugins-{type}s-{plugin}-ecs_compatibility,ECS compatibility>> must be enabled (set to `v1` or `v8`) for data streams to work properly. + +[id="{version}-plugins-{type}s-{plugin}-data_stream_auto_routing"] +===== `data_stream_auto_routing` + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* Default value is `true`. + +Automatically routes events by deriving the data stream name using specific event +fields with the `%{[data_stream][type]}-%{[data_stream][dataset]}-%{[data_stream][namespace]}` format. + +If enabled, the `data_stream.*` event fields will take precedence over the +`data_stream_type`, `data_stream_dataset`, and `data_stream_namespace` settings, +but will fall back to them if any of the fields are missing from the event. + +[id="{version}-plugins-{type}s-{plugin}-data_stream_dataset"] +===== `data_stream_dataset` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string] +* Default value is `generic`. + +The data stream dataset used to construct the data stream at index time. + +[id="{version}-plugins-{type}s-{plugin}-data_stream_namespace"] +===== `data_stream_namespace` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string] +* Default value is `default`. + +The data stream namespace used to construct the data stream at index time. + +[id="{version}-plugins-{type}s-{plugin}-data_stream_sync_fields"] +===== `data_stream_sync_fields` + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* Default value is `true` + +Automatically adds and syncs the `data_stream.*` event fields if they are missing from the +event. This ensures that fields match the name of the data stream that is receiving events. + +NOTE: If existing `data_stream.*` event fields do not match the data stream name +and `data_stream_auto_routing` is disabled, the event fields will be +overwritten with a warning. + +[id="{version}-plugins-{type}s-{plugin}-data_stream_type"] +===== `data_stream_type` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string] +* Default value is `logs`. + +The data stream type used to construct the data stream at index time. +Currently, only `logs`, `metrics`, `synthetics` and `traces` are supported. + +[id="{version}-plugins-{type}s-{plugin}-dlq_custom_codes"] +===== `dlq_custom_codes` + +* Value type is {logstash-ref}/configuration-file-structure.html#number[number] +* Default value is `[]`. + +List single-action error codes from Elasticsearch's Bulk API that are considered valid to move the events into the dead letter queue. +This list is an addition to the ordinary error codes considered for this feature, 400 and 404. +It's considered a configuration error to re-use the same predefined codes for success, DLQ or conflict. +The option accepts a list of natural numbers corresponding to HTTP errors codes. + +[id="{version}-plugins-{type}s-{plugin}-dlq_on_failed_indexname_interpolation"] +===== `dlq_on_failed_indexname_interpolation` + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* Default value is `true`. + +If enabled, failed index name interpolation events go into dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch. + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + * This option is deprecated + +NOTE: This option is deprecated due to the +https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal +of types in Elasticsearch 6.0]. It will be removed in the next major version of +Logstash. + +NOTE: This value is ignored and has no effect for Elasticsearch clusters `8.x`. + +This sets the document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +If you don't set a value for this option: + +- for elasticsearch clusters 8.x: no value will be used; +- for elasticsearch clusters 7.x: the value of '_doc' will be used; +- for elasticsearch clusters 6.x: the value of 'doc' will be used; +- for elasticsearch clusters 5.x and below: the event's 'type' field will be used, if the field is not present the value of 'doc' will be used. + +[id="{version}-plugins-{type}s-{plugin}-ecs_compatibility"] +===== `ecs_compatibility` + +* Value type is {logstash-ref}/configuration-file-structure.html#string[string] +* Supported values are: +** `disabled`: does not provide ECS-compatible templates +** `v1`,`v8`: Elastic Common Schema-compliant behavior +* Default value depends on which version of Logstash is running: +** When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default +** Otherwise, the default value is `disabled`. + +Controls this plugin's compatibility with the {ecs-ref}[Elastic Common Schema +(ECS)], including the installation of ECS-compatible index templates. The value +of this setting affects the _default_ values of: + +* <<{version}-plugins-{type}s-{plugin}-index>> +* <<{version}-plugins-{type}s-{plugin}-template_name>> +* <<{version}-plugins-{type}s-{plugin}-ilm_rollover_alias>> + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +NOTE: Deprecated, refer to <<{version}-plugins-{type}s-{plugin}-silence_errors_in_log>>. + +[id="{version}-plugins-{type}s-{plugin}-custom_headers"] +===== `custom_headers` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the headers sent in each request to +an elasticsearch node. The headers will be used for any kind of request +(_bulk request, template installation, health checks and sniffing). +These custom headers will be overidden by settings like `compression_level`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance +requests across the hosts specified in the `hosts` parameter. Remember the +`http` protocol uses the {ref}/modules-http.html#modules-http[http] address (eg. +9200, not 9300). + +Examples: + + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) + +Exclude {ref}/modules-node.html[dedicated master nodes] from the `hosts` list to +prevent Logstash from sending bulk requests to the master nodes. This parameter +should reference only data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means +`#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` +deprecated[11.17.0, Replaced by <<{version}-plugins-{type}s-{plugin}-compression_level>>] + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Setting `true` enables gzip compression level 1 on requests. + +This setting allows you to reduce this plugin's outbound network traffic by +compressing each bulk _request_ to {es}. + +NOTE: This output plugin reads compressed _responses_ from {es} regardless + of the value of this setting. + +[id="{version}-plugins-{type}s-{plugin}-ilm_enabled"] +===== `ilm_enabled` + + * Value can be any of: `true`, `false`, `auto` + * Default value is `auto` + +The default setting of `auto` will automatically enable +{ref}/index-lifecycle-management.html[Index Lifecycle Management], if the +Elasticsearch cluster is running Elasticsearch version `7.0.0` or higher with +the ILM feature enabled, and disable it otherwise. + +Setting this flag to `false` will disable the Index Lifecycle Management +feature, even if the Elasticsearch cluster supports ILM. +Setting this flag to `true` will enable Index Lifecycle Management feature, if +the Elasticsearch cluster supports it. This is required to enable Index +Lifecycle Management on a version of Elasticsearch earlier than version `7.0.0`. + +NOTE: This feature requires a Basic License or above to be installed on an +Elasticsearch cluster version 6.6.0 or later. + +[id="{version}-plugins-{type}s-{plugin}-ilm_pattern"] +===== `ilm_pattern` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `{now/d}-000001` + +Pattern used for generating indices managed by +{ref}/index-lifecycle-management.html[Index Lifecycle Management]. The value +specified in the pattern will be appended to the write alias, and incremented +automatically when a new index is created by ILM. + +Date Math can be used when specifying an ilm pattern, see +{ref}/indices-rollover-index.html#_using_date_math_with_the_rollover_api[Rollover +API docs] for details. + +NOTE: Updating the pattern will require the index template to be rewritten. + +NOTE: The pattern must finish with a dash and a number that will be automatically +incremented when indices rollover. + +NOTE: The pattern is a 6-digit string padded by zeros, regardless of prior index name. +Example: 000001. See +{ref}/indices-rollover-index.html#rollover-index-api-path-params[Rollover path +parameters API docs] for details. + +[id="{version}-plugins-{type}s-{plugin}-ilm_policy"] +===== `ilm_policy` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `logstash-policy` + +Modify this setting to use a custom Index Lifecycle Management policy, rather +than the default. If this value is not set, the default policy will be +automatically installed into Elasticsearch + +NOTE: If this setting is specified, the policy must already exist in Elasticsearch +cluster. + +[id="{version}-plugins-{type}s-{plugin}-ilm_rollover_alias"] +===== `ilm_rollover_alias` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value depends on whether <<{version}-plugins-{type}s-{plugin}-ecs_compatibility>> is enabled: + ** ECS Compatibility disabled: `logstash` + ** ECS Compatibility enabled: `ecs-logstash` + +The rollover alias is the alias where indices managed using Index Lifecycle +Management will be written to. + +NOTE: If both `index` and `ilm_rollover_alias` are specified, +`ilm_rollover_alias` takes precedence. + +NOTE: Updating the rollover alias will require the index template to be +rewritten. + +NOTE: `ilm_rollover_alias` does NOT support dynamic variable substitution as +`index` does. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value depends on whether <<{version}-plugins-{type}s-{plugin}-ecs_compatibility>> is enabled: + ** ECS Compatibility disabled: `"logstash-%{+yyyy.MM.dd}"` + ** ECS Compatibility enabled: `"ecs-logstash-%{+yyyy.MM.dd}"` + +The indexing target to write events to. +Can point to an {ref}/index-mgmt.html[index], {ref}/aliases.html[alias], or {ref}/data-streams.html[data stream]. +This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +Logstash uses +http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[Joda +formats] and the `@timestamp` field of each event is being used as source for the date. + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` for non-time series data, and `false` for data streams. + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name <<{version}-plugins-{type}s-{plugin}-template_name>> +does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run +Elasticsearch behind a proxy that remaps the root path for the Elasticsearch +HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value. + +Set which ingest pipeline you wish to execute for an event. You can also use +event dependent configuration here like `pipeline => "%{[@metadata][pipeline]}"`. +The pipeline parameter won't be set if the value resolves to empty string (""). + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This setting accepts only URI arguments to prevent leaking credentials. +An empty string is treated as if proxy was not set. This is useful when using +environment variables e.g. `proxy => '${LS_PROXY:}'`. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up +to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document. + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +Example: +[source,ruby] + output { + elasticsearch { + script => "ctx._source.message = params.event.get('message')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. +When using indexed (stored) scripts on Elasticsearch 6.0 and higher, you must set +this parameter to `""` (empty string). + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticsearch's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-silence_errors_in_log"] +===== `silence_errors_in_log` + +* Value type is {logstash-ref}/configuration-file-structure.html#array[array] +* Default value is `[]` + +Defines the list of Elasticsearch errors that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `version_conflict_engine_exception`. + +[source,ruby] + output { + elasticsearch { + silence_errors_in_log => ["version_conflict_engine_exception"] + } + } + +NOTE: Deprecates <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>>. + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them +to the hosts list. +For Elasticsearch 5.x and 6.x any nodes with `http.enabled` (on by default) will +be added to the hosts list, excluding master-only nodes. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file. + +NOTE: This setting can be used only if <<{version}-plugins-{type}s-{plugin}-ssl_key>> is set. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is a list of {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting + +The .cer or .pem files to validate the server's certificate. + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cipher_suites"] +===== `ssl_cipher_suites` + * Value type is a list of {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting + +The list of cipher suites to use, listed by priorities. +Supported cipher suites vary depending on the Java and protocol versions. + +[id="{version}-plugins-{type}s-{plugin}-ssl_enabled"] +===== `ssl_enabled` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. +Leaving this unspecified will use whatever scheme is specified in the URLs listed in <<{version}-plugins-{type}s-{plugin}-hosts>> or extracted from the <<{version}-plugins-{type}s-{plugin}-cloud_id>>. +If no explicit protocol is specified plain HTTP will be used. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +This key must be in the PKCS8 format and PEM encoded. +You can use the https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.html[openssl pkcs8] command to complete the conversion. +For example, the command to convert a PEM encoded PKCS1 private key to a PEM encoded, non-encrypted PKCS8 key is: + +[source,sh] +----- +openssl pkcs8 -inform PEM -in path/to/logstash.key -topk8 -nocrypt -outform PEM -out path/to/logstash.pkcs8.key +----- + +NOTE: This setting can be used only if <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> is set. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_path"] +===== `ssl_keystore_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either `.jks` or `.p12` + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value can be any of: `jks`, `pkcs12` + * If not provided, the value will be inferred from the keystore filename. + +The format of the keystore file. It must be either `jks` or `pkcs12`. + +[id="{version}-plugins-{type}s-{plugin}-ssl_supported_protocols"] +===== `ssl_supported_protocols` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'` + * Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. + `'TLSv1.1'` is not considered secure and is only provided for legacy applications. + +List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster. + +For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the +`LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash. + +NOTE: If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, +the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in +the *$JDK_HOME/conf/security/java.security* configuration file. That is, `TLSv1.1` needs to be removed from the list. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_path"] +===== `ssl_truststore_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The truststore to validate the server's certificate. +It can be either `.jks` or `.p12`. + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + +* Value can be any of: `jks`, `pkcs12` +* If not provided, the value will be inferred from the truststore filename. + +The format of the truststore file. It must be either `jks` or `pkcs12`. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verification_mode"] +===== `ssl_verification_mode` + + * Value can be any of: `full`, `none` + * Default value is `full` + +Defines how to verify the certificates presented by another party in the TLS connection: + +`full` validates that the server certificate has an issue date that’s within +the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and +has a hostname or IP address that matches the names within the certificate. + +`none` performs no certificate validation. + +WARNING: Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_api"] +===== `template_api` + + * Value can be any of: `auto`, `legacy`, `composable` + * Default value is `auto` + +The default setting of `auto` will use +{ref}/index-templates.html[index template API] to create index template, if the +Elasticsearch cluster is running Elasticsearch version `8.0.0` or higher, +and use {ref}/indices-templates-v1.html[legacy template API] otherwise. + +Setting this flag to `legacy` will use legacy template API to create index template. +Setting this flag to `composable` will use index template API to create index template. + +NOTE: The format of template provided to <<{version}-plugins-{type}s-{plugin}-template>> needs to match the template API being used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value depends on whether <<{version}-plugins-{type}s-{plugin}-ecs_compatibility>> is enabled: + ** ECS Compatibility disabled: `logstash` + ** ECS Compatibility enabled: `ecs-logstash` + + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode. +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking for a stale connection to determine if a keepalive request is needed. +Consider setting this value lower than the default, possibly to 0, if you get connection errors regularly. + +This client is based on Apache Commons. Here's how the +https://hc.apache.org/httpcomponents-client-4.5.x/current/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[Apache +Commons documentation] describes this option: "Defines period of inactivity in +milliseconds after which persistent connections must be re-validated prior to +being leased to the consumer. Non-positive value passed to this method disables +connection validation. This check helps detect connections that have become +stale (half-closed) while kept inactive in the pool." + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use +a field value here. See the +https://www.elastic.co/blog/elasticsearch-versioning-support[versioning support +blog] for more information. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. See the +https://www.elastic.co/blog/elasticsearch-versioning-support[versioning support +blog] and {ref}/docs-index_.html#_version_types[Version types] in the +Elasticsearch documentation. + +[id="{version}-plugins-{type}s-{plugin}-deprecated-options"] +==== Elasticsearch Output Deprecated Configuration Options + +This plugin supports the following deprecated configurations. + +WARNING: Deprecated options are subject to removal in future releases. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting|Input type|Replaced by +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|<<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|<<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>> +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|<<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|<<{version}-plugins-{type}s-{plugin}-ssl_enabled>> +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|<<{version}-plugins-{type}s-{plugin}-ssl_verification_mode>> +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|<<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>> +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|<<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> +|======================================================================= + + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>>] + +* Value type is a list of {logstash-ref}/configuration-file-structure.html#path[path] +* There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_keystore_path>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#path[path] +* There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +NOTE: You cannot use this setting and <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> at the same time. + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#password[password] +* There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_enabled>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. +Leaving this unspecified will use whatever scheme is specified in the URLs listed in <<{version}-plugins-{type}s-{plugin}-hosts>> or extracted from the <<{version}-plugins-{type}s-{plugin}-cloud_id>>. +If no explicit protocol is specified plain HTTP will be used. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_verification_mode>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] +* Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_truststore_path>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#path[path] +* There is no default value for this setting. + +The truststore to validate the server's certificate. +It can be either `.jks` or `.p12`. +Use either `:truststore` or `:cacert`. + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` +deprecated[11.14.0, Replaced by <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>>] + +* Value type is {logstash-ref}/configuration-file-structure.html#password[password] +* There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] + +:no_codec!: