From b87ac8a55cc5b359decf32e03d9cac285a2d182a Mon Sep 17 00:00:00 2001 From: logstashmachine <43502315+logstashmachine@users.noreply.github.com> Date: Mon, 20 Oct 2025 17:02:03 +0000 Subject: [PATCH] updated docs for 9.2 --- .../filters/elastic_integration.asciidoc | 21 ++- docs/plugins/filters/elasticsearch.asciidoc | 146 ++++++++++++++++-- docs/plugins/filters/jdbc_static.asciidoc | 8 +- docs/plugins/filters/jdbc_streaming.asciidoc | 8 +- docs/plugins/filters/translate.asciidoc | 33 +++- docs/plugins/filters/xml.asciidoc | 8 +- docs/plugins/inputs/azure_event_hubs.asciidoc | 82 +++++----- docs/plugins/inputs/beats.asciidoc | 8 +- docs/plugins/inputs/elastic_agent.asciidoc | 8 +- docs/plugins/inputs/elasticsearch.asciidoc | 8 +- docs/plugins/inputs/http.asciidoc | 8 +- docs/plugins/inputs/jdbc.asciidoc | 8 +- docs/plugins/inputs/jms.asciidoc | 8 +- docs/plugins/inputs/kafka.asciidoc | 8 +- docs/plugins/inputs/snmp.asciidoc | 8 +- docs/plugins/inputs/snmptrap.asciidoc | 17 +- docs/plugins/inputs/tcp.asciidoc | 8 +- docs/plugins/integrations/jdbc.asciidoc | 8 +- docs/plugins/integrations/kafka.asciidoc | 8 +- docs/plugins/integrations/snmp.asciidoc | 8 +- docs/plugins/outputs/elasticsearch.asciidoc | 30 ++-- docs/plugins/outputs/kafka.asciidoc | 8 +- 22 files changed, 311 insertions(+), 146 deletions(-) diff --git a/docs/plugins/filters/elastic_integration.asciidoc b/docs/plugins/filters/elastic_integration.asciidoc index b042acaf8..6f3de6a85 100644 --- a/docs/plugins/filters/elastic_integration.asciidoc +++ b/docs/plugins/filters/elastic_integration.asciidoc @@ -5,10 +5,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v9.0.0 -:release_date: 2025-04-28 -:changelog_url: https://github.com/elastic/logstash-filter-elastic_integration/blob/v9.0.0/CHANGELOG.md -include_path: ../include +:version: v9.2.0 +:release_date: 2025-10-02 +:changelog_url: https://github.com/elastic/logstash-filter-elastic_integration/blob/v9.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -19,7 +19,7 @@ END - GENERATED VARIABLES, DO NOT EDIT! === {elastic-integration-name} filter plugin -include::{include_path}/plugin_header.asciidoc[] +include::{include_path}/plugin_header-nonstandard.asciidoc[] .Elastic Enterprise License **** @@ -361,6 +361,7 @@ This plugin supports the following configuration options plus the <> |<>|No | <> | <>|No | <> | <>|No +| <> | <>|No | <> | <>|No | <> |<>|No | <> | <>|No @@ -495,6 +496,16 @@ A password when using HTTP Basic Authentication to connect to {es}. * When present, the event's initial pipeline will _not_ be auto-detected from the event's data stream fields. * Value may be a {logstash-ref}/event-dependent-configuration.html#sprintf[sprintf-style] template; if any referenced fields cannot be resolved the event will not be routed to an ingest pipeline. +[id="plugins-{type}s-{plugin}-proxy"] +===== `proxy` + +* Value type is <> +* There is no default value for this setting. + +Address of the HTTP forward proxy used to connect to the {es} cluster. +An empty string is treated as if proxy was not set. +Environment variables may be used to set this value, e.g. `proxy => '${LS_PROXY:}'`. + [id="plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` diff --git a/docs/plugins/filters/elasticsearch.asciidoc b/docs/plugins/filters/elasticsearch.asciidoc index 03c9366e9..c6ae7064c 100644 --- a/docs/plugins/filters/elasticsearch.asciidoc +++ b/docs/plugins/filters/elasticsearch.asciidoc @@ -5,10 +5,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.2.0 -:release_date: 2025-05-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v4.2.0/CHANGELOG.md -include_path: ../include +:version: v4.3.1 +:release_date: 2025-09-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v4.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -55,7 +55,7 @@ if [type] == "end" { The example below reproduces the above example but utilises the query_template. This query_template represents a full Elasticsearch query DSL and supports the -standard Logstash field substitution syntax. The example below issues +standard {ls} field substitution syntax. The example below issues the same query as the first example but uses the template shown. [source,ruby] @@ -119,6 +119,110 @@ Authentication to a secure Elasticsearch cluster is possible using _one_ of the Authorization to a secure Elasticsearch cluster requires `read` permission at index level and `monitoring` permissions at cluster level. The `monitoring` permission at cluster level is necessary to perform periodic connectivity checks. +[id="plugins-{type}s-{plugin}-esql"] +==== {esql} support + +.Technical Preview +**** +The {esql} feature that allows using ES|QL queries with this plugin is in Technical Preview. +Configuration options and implementation details are subject to change in minor releases without being preceded by deprecation warnings. +**** + +{es} Query Language ({esql}) provides a SQL-like interface for querying your {es} data. + +To use {esql}, this plugin needs to be installed in {ls} 8.17.4 or newer, and must be connected to {es} 8.11 or newer. + +To configure {esql} query in the plugin, set your {esql} query in the `query` parameter. + +IMPORTANT: We recommend understanding {ref}/esql-limitations.html[{esql} current limitations] before using it in production environments. + +The following is a basic {esql} query that sets the food name to transaction event based on upstream event's food ID: +[source, ruby] + filter { + elasticsearch { + hosts => [ 'https://..'] + api_key => '....' + query => ' + FROM food-index + | WHERE id == ?food_id + ' + query_params => { + "food_id" => "[food][id]" + } + } + } + +Set `config.support_escapes: true` in `logstash.yml` if you need to escape special chars in the query. + +In the result event, the plugin sets total result size in `[@metadata][total_values]` field. + +[id="plugins-{type}s-{plugin}-esql-event-mapping"] +===== Mapping {esql} result to {ls} event +{esql} returns query results in a structured tabular format, where data is organized into _columns_ (fields) and _values_ (entries). +The plugin maps each value entry to an event, populating corresponding fields. +For example, a query might produce a table like: + +[cols="2,1,1,1,2",options="header"] +|=== +|`timestamp` |`user_id` | `action` | `status.code` | `status.desc` + +|2025-04-10T12:00:00 |123 |login |200 | Success +|2025-04-10T12:05:00 |456 |purchase |403 | Forbidden (unauthorized user) +|=== + +For this case, the plugin creates two JSON look like objects as below and places them into the `target` field of the event if `target` is defined. +If `target` is not defined, the plugin places the _only_ first result at the root of the event. +[source, json] +[ + { + "timestamp": "2025-04-10T12:00:00", + "user_id": 123, + "action": "login", + "status": { + "code": 200, + "desc": "Success" + } + }, + { + "timestamp": "2025-04-10T12:05:00", + "user_id": 456, + "action": "purchase", + "status": { + "code": 403, + "desc": "Forbidden (unauthorized user)" + } + } +] + +NOTE: If your index has a mapping with sub-objects where `status.code` and `status.desc` actually dotted fields, they appear in {ls} events as a nested structure. + +[id="plugins-{type}s-{plugin}-esql-multifields"] +===== Conflict on multi-fields + +{esql} query fetches all parent and sub-fields fields if your {es} index has https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/multi-fields[multi-fields] or https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/subobjects[subobjects]. +Since {ls} events cannot contain parent field's concrete value and sub-field values together, the plugin ignores sub-fields with warning and includes parent. +We recommend using the `RENAME` (or `DROP` to avoid warning) keyword in your {esql} query explicitly rename the fields to include sub-fields into the event. + +This is a common occurrence if your template or mapping follows the pattern of always indexing strings as "text" (`field`) + " keyword" (`field.keyword`) multi-field. +In this case it's recommended to do `KEEP field` if the string is identical and there is only one subfield as the engine will optimize and retrieve the keyword, otherwise you can do `KEEP field.keyword | RENAME field.keyword as field`. + +To illustrate the situation with example, assuming your mapping has a time `time` field with `time.min` and `time.max` sub-fields as following: +[source, ruby] + "properties": { + "time": { "type": "long" }, + "time.min": { "type": "long" }, + "time.max": { "type": "long" } + } + +The {esql} result will contain all three fields but the plugin cannot map them into {ls} event. +To avoid this, you can use the `RENAME` keyword to rename the `time` parent field to get all three fields with unique fields. +[source, ruby] + ... + query => 'FROM my-index | RENAME time AS time.current' + ... + +For comprehensive ES|QL syntax reference and best practices, see the https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-syntax.html[{esql} documentation]. + [id="plugins-{type}s-{plugin}-options"] ==== Elasticsearch Filter Configuration Options @@ -144,6 +248,8 @@ NOTE: As of version `4.0.0` of this plugin, a number of previously deprecated se | <> |<>|No | <> |<>|No | <> |<>|No +| <> |<>, one of `["dsl", "esql"]`|No +| <> |<> or <>|No | <> |<>|No | <> |<>|No | <> |<>|No @@ -340,11 +446,30 @@ environment variables e.g. `proxy => '${LS_PROXY:}'`. * Value type is <> * There is no default value for this setting. -Elasticsearch query string. More information is available in the -{ref}/query-dsl-query-string-query.html#query-string-syntax[Elasticsearch query -string documentation]. -Use either `query` or `query_template`. +The query to be executed. +The accepted query shape is DSL query string or ES|QL. +For the DSL query string, use either `query` or `query_template`. +Read the {ref}/query-dsl-query-string-query.html[{es} query +string documentation] or {ref}/esql.html[{es} ES|QL documentation] for more information. + +[id="plugins-{type}s-{plugin}-query_type"] +===== `query_type` + +* Value can be `dsl` or `esql` +* Default value is `dsl` + +Defines the <> shape. +When `dsl`, the query shape must be valid {es} JSON-style string. +When `esql`, the query shape must be a valid {esql} string and `index`, `query_template` and `sort` parameters are not allowed. + +[id="plugins-{type}s-{plugin}-query_params"] +===== `query_params` + +* The value type is <> or <>. When an array provided, the array elements are pairs of `key` and `value`. +* There is no default value for this setting +Named parameters in {esql} to send to {es} together with <>. +Visit {ref}/esql-rest.html#esql-rest-params[passing parameters to query page] for more information. [id="plugins-{type}s-{plugin}-query_template"] ===== `query_template` @@ -541,8 +666,9 @@ Tags the event on failure to look up previous log event information. This can be Define the target field for placing the result data. If this setting is omitted, the target will be the root (top level) of the event. +It is highly recommended to set when using `query_type=>'esql'` to set all query results into the event. -The destination fields specified in <>, <>, and <> are relative to this target. +When `query_type=>'dsl'`, the destination fields specified in <>, <>, and <> are relative to this target. For example, if you want the data to be put in the `operation` field: [source,ruby] diff --git a/docs/plugins/filters/jdbc_static.asciidoc b/docs/plugins/filters/jdbc_static.asciidoc index a60922de2..cb778ff6c 100644 --- a/docs/plugins/filters/jdbc_static.asciidoc +++ b/docs/plugins/filters/jdbc_static.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v5.6.0 -:release_date: 2025-05-30 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.0/CHANGELOG.md -:include_path: ../include +:version: v5.6.1 +:release_date: 2025-09-30 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/filters/jdbc_streaming.asciidoc b/docs/plugins/filters/jdbc_streaming.asciidoc index bc86d638d..523ea625b 100644 --- a/docs/plugins/filters/jdbc_streaming.asciidoc +++ b/docs/plugins/filters/jdbc_streaming.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v5.6.0 -:release_date: 2025-05-30 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.0/CHANGELOG.md -:include_path: ../include +:version: v5.6.1 +:release_date: 2025-09-30 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/filters/translate.asciidoc b/docs/plugins/filters/translate.asciidoc index 59c7deacf..1cbc2d89d 100644 --- a/docs/plugins/filters/translate.asciidoc +++ b/docs/plugins/filters/translate.asciidoc @@ -5,10 +5,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v3.4.2 -:release_date: 2023-06-14 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.4.2/CHANGELOG.md -:include_path: ../include +:version: v3.5.0 +:release_date: 2025-08-04 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.5.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -25,12 +25,12 @@ A general search and replace tool that uses a configured hash and/or a file to determine replacement values. Currently supported are YAML, JSON, and CSV files. Each dictionary item is a key value pair. -You can specify dictionary entries in one of two ways: +You can specify dictionary entries in one of two ways: * The `dictionary` configuration item can contain a hash representing -the mapping. +the mapping. * An external file (readable by logstash) may be specified in the -`dictionary_path` configuration item. +`dictionary_path` configuration item. These two methods may not be used in conjunction; it will produce an error. @@ -110,6 +110,7 @@ This plugin supports the following configuration options plus the <> |<>|No | <> |<>|No | <> |<>|No +| <> |<>, one of `["one_shot", "streaming"]`|No |======================================================================= Also see <> for a list of options supported by all @@ -158,7 +159,7 @@ NOTE: It is an error to specify both `dictionary` and `dictionary_path`. * There is no default value for this setting. The full path of the external dictionary file. The format of the table should be -a standard YAML, JSON, or CSV. +a standard YAML, JSON, or CSV. Specify any integer-based keys in quotes. The value taken from the event's `source` setting is converted to a string. The lookup dictionary keys must also @@ -433,5 +434,21 @@ the filter will succeed. This will clobber the old value of the source field! The max amount of code points in the YAML file in `dictionary_path`. Please be aware that byte limit depends on the encoding. This setting is effective for YAML file only. YAML over the limit throws exception. +[id="plugins-{type}s-{plugin}-yaml_load_strategy"] +===== `yaml_load_strategy` + +* Value can be any of: `one_shot`, `streaming` +* Default value is `one_shot` + +How to load and parse the YAML file. This setting defaults to `one_shot`, which loads the entire +YAML file into the parser in one go, emitting the final dictionary from the fully parsed YAML document. + +Setting to `streaming` will instead instruct the parser to emit one "YAML element" at a time, constructing the dictionary +during parsing. This mode drastically reduces the amount of memory required to load or refresh the dictionary and it is also faster. + +Due to underlying implementation differences this mode only supports basic types such as Arrays, Objects, Strings, numbers and booleans, and does not support tags. + +If you have a lot of translate filters with large YAML documents consider changing this setting to `streaming` instead. + [id="plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/plugins/filters/xml.asciidoc b/docs/plugins/filters/xml.asciidoc index b5d32598d..74f41d059 100644 --- a/docs/plugins/filters/xml.asciidoc +++ b/docs/plugins/filters/xml.asciidoc @@ -5,10 +5,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.3.1 -:release_date: 2025-04-22 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.3.1/CHANGELOG.md -:include_path: ../include +:version: v4.3.2 +:release_date: 2025-07-24 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/azure_event_hubs.asciidoc b/docs/plugins/inputs/azure_event_hubs.asciidoc index 7b1ac3d5d..d7c969cea 100644 --- a/docs/plugins/inputs/azure_event_hubs.asciidoc +++ b/docs/plugins/inputs/azure_event_hubs.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v1.5.1 -:release_date: 2025-01-03 -:changelog_url: https://github.com/logstash-plugins/logstash-input-azure_event_hubs/blob/v1.5.1/CHANGELOG.md -:include_path: ../include +:version: v1.5.2 +:release_date: 2025-07-11 +:changelog_url: https://github.com/logstash-plugins/logstash-input-azure_event_hubs/blob/v1.5.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -28,11 +28,11 @@ highly scalable data streaming platform and event ingestion service. Event producers send events to the Azure Event Hub, and this plugin consumes those events for use with Logstash. -Many Azure services integrate with the Azure Event Hubs. +Many Azure services integrate with the Azure Event Hubs. https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-azure-monitor[Azure -Monitor], for example, integrates with Azure Event Hubs to provide infrastructure metrics. +Monitor], for example, integrates with Azure Event Hubs to provide infrastructure metrics. -IMPORTANT: This plugin requires outbound connections to ports `tcp/443`, `tcp/9093`, `tcp/5671`, and `tcp/5672`, +IMPORTANT: This plugin requires outbound connections to ports `tcp/443`, `tcp/9093`, `tcp/5671`, and `tcp/5672`, as noted in the https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-faq#what-ports-do-i-need-to-open-on-the-firewall[Microsoft Event Hub documentation]. ===== Event Hub connection string @@ -52,7 +52,7 @@ Endpoint=sb://logstash.servicebus.windows.net/;SharedAccessKeyName=activity-log- ===== Blob Storage and connection string https://azure.microsoft.com/en-us/services/storage/blobs[Azure Blob Storage -account] is an essential part of Azure-to-Logstash configuration. +account] is an essential part of Azure-to-Logstash configuration. A Blob Storage account is a central location that enables multiple instances of Logstash to work together to process events. It records the offset (location) of processed events. On restart, Logstash resumes processing @@ -62,7 +62,7 @@ Configuration notes: * A Blob Storage account is highly recommended for use with this plugin, and is likely required for production servers. -* The `storage_connection` option passes the blob storage connection string. +* The `storage_connection` option passes the blob storage connection string. * Configure all Logstash instances to use the same `storage_connection` to get the benefits of shared processing. @@ -73,7 +73,7 @@ Sample Blob Storage connection string: DefaultEndpointsProtocol=https;AccountName=logstash;AccountKey=ETOPnkd/hDAWidkEpPZDiXffQPku/SZdXhPSLnfqdRTalssdEuPkZwIcouzXjCLb/xPZjzhmHfwRCGo0SBSw==;EndpointSuffix=core.windows.net ---- -Find the connection string to Blob Storage here: +Find the connection string to Blob Storage here: https://portal.azure.com[Azure Portal]`-> Blob Storage account -> Access keys`. [id="plugins-{type}s-{plugin}-best-practices"] @@ -83,8 +83,8 @@ Here are some guidelines to help you avoid data conflicts that can cause lost events. * <> -* <> -* <> +* <> +* <> [id="plugins-{type}s-{plugin}-bp-group"] ====== Create a Logstash consumer group @@ -99,7 +99,7 @@ work together for processing events. The offsets (position) of the Event Hubs are stored in the configured Azure Blob store. The Azure Blob store uses paths like a file system to store the offsets. If the paths between multiple Event Hubs overlap, then the offsets may be stored -incorrectly. +incorrectly. To avoid duplicate file paths, use the advanced configuration model and make sure that at least one of these options is different per Event Hub: @@ -127,7 +127,7 @@ independently to each. **Example: Single event hub** -If you’re collecting activity logs from one event hub instance, +If you’re collecting activity logs from one event hub instance, then only 2 threads are required. * Event hubs = 1 @@ -137,12 +137,12 @@ then only 2 threads are required. If you are collecting activity logs from more than event hub instance, then at least 1 thread per event hub is required. -* Event hubs = 4 -* Minimum threads = 5 (4 Event Hubs + 1) +* Event hubs = 4 +* Minimum threads = 5 (4 Event Hubs + 1) When you are using multiple partitions per event hub, you may want to assign more threads. A good base level is (1 + `number of event hubs * number of partitions`). -That is, one thread for each partition across all event hubs. +That is, one thread for each partition across all event hubs. [id="plugins-{type}s-{plugin}-eh_config_models"] ==== Configuration models @@ -181,9 +181,9 @@ The advanced configuration model accommodates deployments where different Event Hubs require different configurations. Options can be configured per Event Hub. You provide a list of Event Hub names through the `event_hubs` option. Under each name, specify the configuration for that Event Hub. Options can be defined -globally or expressed per Event Hub. +globally or expressed per Event Hub. -If the same configuration option appears in both the global and `event_hub` +If the same configuration option appears in both the global and `event_hub` sections, the more specific (event_hub) setting takes precedence. NOTE: Advanced configuration is not necessary or recommended for most use cases. @@ -214,7 +214,7 @@ input { In this example, `storage_connection` and `decorate_events` are applied globally. The two Event Hubs each have their own settings for `consumer_groups` -and `initial_position`. +and `initial_position`. [id="plugins-{type}s-{plugin}-options"] ==== Azure Event Hubs Configuration Options @@ -253,7 +253,7 @@ configuration uses `event_hubs` and `event_hub_connection` (singular). * Valid entries are `basic` or `advanced` * Default value is `basic` -Sets configuration to either <> or <>. +Sets configuration to either <> or <>. [source,ruby] ---- @@ -267,10 +267,10 @@ azure_event_hubs { * Value type is <> * No default value * Ignored for basic configuration -* Required for advanced configuration +* Required for advanced configuration Defines the Event Hubs to be read. An array of hashes where each entry is a -hash of the Event Hub name and its configuration options. +hash of the Event Hub name and its configuration options. [source,ruby] ---- @@ -297,7 +297,7 @@ azure_event_hubs { * Required for basic configuration List of connection strings that identifies the Event Hubs to be read. Connection -strings include the EntityPath for the Event Hub. +strings include the EntityPath for the Event Hub. The `event_hub_connections` option is defined per Event Hub. All other configuration options are shared among Event Hubs. @@ -337,12 +337,12 @@ azure_event_hubs { * Value type is <> * Default value is `5` seconds * Set to `0` to disable. - + Interval in seconds to write checkpoints during batch processing. Checkpoints tell Logstash where to resume processing after a restart. Checkpoints are -automatically written at the end of each batch, regardless of this setting. +automatically written at the end of each batch, regardless of this setting. -Writing checkpoints too frequently can slow down processing unnecessarily. +Writing checkpoints too frequently can slow down processing unnecessarily. [source,ruby] ---- @@ -355,7 +355,7 @@ azure_event_hubs { [id="plugins-{type}s-{plugin}-consumer_group"] ===== `consumer_group` * Value type is <> -* Default value is `$Default` +* Default value is `$Default` Consumer group used to read the Event Hub(s). Create a consumer group specifically for Logstash. Then ensure that all instances of Logstash use that @@ -376,7 +376,7 @@ azure_event_hubs { * Default value is `false` Adds metadata about the Event Hub, including Event Hub name, consumer_group, -processor_host, partition, offset, sequence, timestamp, and event_size. +processor_host, partition, offset, sequence, timestamp, and event_size. [source,ruby] ---- @@ -392,10 +392,10 @@ azure_event_hubs { * Valid arguments are `beginning`, `end`, `look_back` * Default value is `beginning` -When first reading from an Event Hub, start from this position: +When first reading from an Event Hub, start from this position: -* `beginning` reads all pre-existing events in the Event Hub -* `end` does not read any pre-existing events in the Event Hub +* `beginning` reads all pre-existing events in the Event Hub +* `end` does not read any pre-existing events in the Event Hub * `look_back` reads `end` minus a number of seconds worth of pre-existing events. You control the number of seconds using the `initial_position_look_back` option. @@ -419,7 +419,7 @@ azure_event_hubs { Number of seconds to look back to find the initial position for pre-existing events. This option is used only if `initial_position` is set to `look_back`. If `storage_connection` is set, this configuration applies only the first time Logstash -reads from the Event Hub. +reads from the Event Hub. [source,ruby] ---- @@ -438,7 +438,7 @@ azure_event_hubs { Maximum number of events retrieved and processed together. A checkpoint is created after each batch. Increasing this value may help with performance, but -requires more memory. +requires more memory. [source,ruby] ---- @@ -451,11 +451,11 @@ azure_event_hubs { [id="plugins-{type}s-{plugin}-storage_connection"] ===== `storage_connection` * Value type is <> -* No default value +* No default value Connection string for blob account storage. Blob account storage persists the offsets between restarts, and ensures that multiple instances of Logstash -process different partitions. +process different partitions. When this value is set, restarts resume where processing left off. When this value is not set, the `initial_position` value is used on every restart. @@ -476,8 +476,8 @@ azure_event_hubs { * Defaults to the Event Hub name if not defined Name of the storage container used to persist offsets and allow multiple instances of Logstash -to work together. - +to work together. + [source,ruby] ---- azure_event_hubs { @@ -490,7 +490,7 @@ azure_event_hubs { To avoid overwriting offsets, you can use different storage containers. This is particularly important if you are monitoring two Event Hubs with the same name. You can use the advanced configuration model to configure different storage -containers. +containers. [source,ruby] ---- @@ -519,7 +519,7 @@ azure_event_hubs { Total number of threads used to process events. The value you set here applies to all Event Hubs. Even with advanced configuration, this value is a global -setting, and can't be set per event hub. +setting, and can't be set per event hub. [source,ruby] ---- @@ -528,7 +528,7 @@ azure_event_hubs { } ---- -The number of threads should be the number of Event Hubs plus one or more. +The number of threads should be the number of Event Hubs plus one or more. See <> for more information. [id="plugins-{type}s-{plugin}-common-options"] diff --git a/docs/plugins/inputs/beats.asciidoc b/docs/plugins/inputs/beats.asciidoc index 0d4d58532..366cf622f 100644 --- a/docs/plugins/inputs/beats.asciidoc +++ b/docs/plugins/inputs/beats.asciidoc @@ -8,10 +8,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v7.0.2 -:release_date: 2025-02-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.2/CHANGELOG.md -:include_path: ../include +:version: v7.0.3 +:release_date: 2025-09-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/elastic_agent.asciidoc b/docs/plugins/inputs/elastic_agent.asciidoc index bdbfcb6ca..7448d3d6b 100644 --- a/docs/plugins/inputs/elastic_agent.asciidoc +++ b/docs/plugins/inputs/elastic_agent.asciidoc @@ -8,10 +8,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v7.0.2 -:release_date: 2025-02-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.2/CHANGELOG.md -:include_path: ../include +:version: v7.0.3 +:release_date: 2025-09-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/elasticsearch.asciidoc b/docs/plugins/inputs/elasticsearch.asciidoc index 52d148ce9..dc0e519a1 100644 --- a/docs/plugins/inputs/elasticsearch.asciidoc +++ b/docs/plugins/inputs/elasticsearch.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v5.2.0 -:release_date: 2025-06-06 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v5.2.0/CHANGELOG.md -:include_path: ../include +:version: v5.2.1 +:release_date: 2025-09-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v5.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/http.asciidoc b/docs/plugins/inputs/http.asciidoc index 050541bad..d18fd5410 100644 --- a/docs/plugins/inputs/http.asciidoc +++ b/docs/plugins/inputs/http.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.1.2 -:release_date: 2025-02-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v4.1.2/CHANGELOG.md -:include_path: ../include +:version: v4.1.3 +:release_date: 2025-09-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v4.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/jdbc.asciidoc b/docs/plugins/inputs/jdbc.asciidoc index 57b1ba0b9..c08b80c56 100644 --- a/docs/plugins/inputs/jdbc.asciidoc +++ b/docs/plugins/inputs/jdbc.asciidoc @@ -7,10 +7,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v5.6.0 -:release_date: 2025-05-30 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.0/CHANGELOG.md -:include_path: ../include +:version: v5.6.1 +:release_date: 2025-09-30 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/jms.asciidoc b/docs/plugins/inputs/jms.asciidoc index c9092b004..0e3146fc8 100644 --- a/docs/plugins/inputs/jms.asciidoc +++ b/docs/plugins/inputs/jms.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v3.3.0 -:release_date: 2025-03-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.3.0/CHANGELOG.md -:include_path: ../include +:version: v3.3.1 +:release_date: 2025-09-08 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/kafka.asciidoc b/docs/plugins/inputs/kafka.asciidoc index 24efd7b0d..6ae52e3b5 100644 --- a/docs/plugins/inputs/kafka.asciidoc +++ b/docs/plugins/inputs/kafka.asciidoc @@ -9,10 +9,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v11.6.3 -:release_date: 2025-06-12 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.3/CHANGELOG.md -:include_path: ../include +:version: v11.6.4 +:release_date: 2025-08-28 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/snmp.asciidoc b/docs/plugins/inputs/snmp.asciidoc index 26e35b5a2..daa165978 100644 --- a/docs/plugins/inputs/snmp.asciidoc +++ b/docs/plugins/inputs/snmp.asciidoc @@ -7,10 +7,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.0.6 -:release_date: 2025-01-23 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.6/CHANGELOG.md -:include_path: ../include +:version: v4.1.0 +:release_date: 2025-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/inputs/snmptrap.asciidoc b/docs/plugins/inputs/snmptrap.asciidoc index e8a46e660..bc9273a5e 100644 --- a/docs/plugins/inputs/snmptrap.asciidoc +++ b/docs/plugins/inputs/snmptrap.asciidoc @@ -7,10 +7,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.0.6 -:release_date: 2025-01-23 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.6/CHANGELOG.md -:include_path: ../include +:version: v4.1.0 +:release_date: 2025-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -61,6 +61,8 @@ The value is stored in the `@metadata` where it can be used by other plugins in |ECS disabled, v1, v8 |Availability|Description |[@metadata][input][snmptrap][pdu][agent_addr]|`SNMPv1`|Network address of the object generating the trap |[@metadata][input][snmptrap][pdu][community]|`SNMPv1` `SNMPv2c`|SNMP community +|[@metadata][input][snmptrap][pdu][context_engine_id]|`SNMPv3`|SNMP context engine ID +|[@metadata][input][snmptrap][pdu][context_name]|`SNMPv3`|SNMP context name |[@metadata][input][snmptrap][pdu][enterprise]|`SNMPv1`|Type of object generating the trap |[@metadata][input][snmptrap][pdu][error_index]|`SNMPv2c` `SNMPv3`|Provides additional information by identifying which variable binding in the list caused the error @@ -351,8 +353,11 @@ The `priv_protocol` option specifies the SNMPv3 privacy/encryption protocol. [id="plugins-{type}s-{plugin}-security_level"] ===== `security_level` - * Value can be any of: `noAuthNoPriv`, `authNoPriv`, `authPriv` - * There is no default value for this setting + * Value can be any of: + - `noAuthNoPriv`: allows receiving traps messages without authentication or encryption. + - `authNoPriv`: trap messages must be authenticated according to <>/<>/<>. Encrypted messages are allowed but not required. + - `authPriv`: trap messages must be both authenticated according to <>/<>/<> and encrypted according to <>/<>. + * The default value is `noAuthNoPriv`. The `security_level` option specifies the SNMPv3 security level between Authentication, No Privacy; Authentication, Privacy; or no Authentication, no Privacy. diff --git a/docs/plugins/inputs/tcp.asciidoc b/docs/plugins/inputs/tcp.asciidoc index 0ac049b10..be2c96092 100644 --- a/docs/plugins/inputs/tcp.asciidoc +++ b/docs/plugins/inputs/tcp.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v7.0.2 -:release_date: 2025-02-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v7.0.2/CHANGELOG.md -:include_path: ../include +:version: v7.0.3 +:release_date: 2025-09-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v7.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/integrations/jdbc.asciidoc b/docs/plugins/integrations/jdbc.asciidoc index 8dfd04bd3..4dbbaf5f5 100644 --- a/docs/plugins/integrations/jdbc.asciidoc +++ b/docs/plugins/integrations/jdbc.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v5.6.0 -:release_date: 2025-05-30 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.0/CHANGELOG.md -:include_path: ../include +:version: v5.6.1 +:release_date: 2025-09-30 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.6.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/integrations/kafka.asciidoc b/docs/plugins/integrations/kafka.asciidoc index c5d84b9f1..d20ac60df 100644 --- a/docs/plugins/integrations/kafka.asciidoc +++ b/docs/plugins/integrations/kafka.asciidoc @@ -7,10 +7,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v11.6.3 -:release_date: 2025-06-12 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.3/CHANGELOG.md -:include_path: ../include +:version: v11.6.4 +:release_date: 2025-08-28 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/integrations/snmp.asciidoc b/docs/plugins/integrations/snmp.asciidoc index 2dbc6b01b..305984906 100644 --- a/docs/plugins/integrations/snmp.asciidoc +++ b/docs/plugins/integrations/snmp.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v4.0.6 -:release_date: 2025-01-23 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.6/CHANGELOG.md -:include_path: ../include +:version: v4.1.0 +:release_date: 2025-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// diff --git a/docs/plugins/outputs/elasticsearch.asciidoc b/docs/plugins/outputs/elasticsearch.asciidoc index 2c3272d73..fdc4b65d4 100644 --- a/docs/plugins/outputs/elasticsearch.asciidoc +++ b/docs/plugins/outputs/elasticsearch.asciidoc @@ -6,10 +6,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v12.0.3 -:release_date: 2025-04-17 -:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v12.0.3/CHANGELOG.md -:include_path: ../include +:version: v12.0.7 +:release_date: 2025-09-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v12.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// @@ -66,9 +66,10 @@ Set the value to port :443 instead. For more info on sending data from {ls} to {es-serverless}, check out the {serverless-docs}/elasticsearch/what-is-elasticsearch-serverless[{es-serverless} docs]. [id="plugins-{type}s-{plugin}-ess"] -==== Hosted {es} Service on Elastic Cloud +==== {ls} to {ech} -{ess-leadin} +You can run Elasticsearch on your own hardware or use Elastic Cloud Hosted, available on AWS, GCP, and Azure. +Try Elastic Cloud Hosted for free: https://cloud.elastic.co/registration. ==== Compatibility with the Elastic Common Schema (ECS) @@ -190,7 +191,7 @@ Example: } -==== Retry Policy +==== Retry policy The retry policy has changed significantly in the 8.1.1 release. This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience @@ -223,7 +224,7 @@ Note that 409 exceptions are no longer retried. Please set a higher `retry_on_co It is more performant for Elasticsearch to retry these exceptions than this plugin. [id="plugins-{type}s-{plugin}-dlq-policy"] -==== DLQ Policy +==== DLQ policy Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately mapping errors cannot be handled without human intervention and without looking @@ -231,8 +232,8 @@ at the field that caused the mapping mismatch. If the DLQ is enabled, the original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error -happens, the problem is logged as a warning, and the event is dropped. See -<> for more information about processing events in the DLQ. +happens, the problem is logged as a warning, and the event is dropped. +Check out https://www.elastic.co/guide/en/logstash/current/dead-letter-queues.html[Dead letter queues (DLQ)] for more information about processing events in the DLQ. The list of error codes accepted for DLQ could be customized with <> but should be used only in motivated cases. @@ -303,12 +304,17 @@ single request. ==== DNS Caching This plugin uses the JVM to lookup DNS entries and is subject to the value of -https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], -a global setting for the JVM. +https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/net/doc-files/net-properties.html#address-cache-heading[Address Cache settings] +such as `networkaddress.cache.ttl` and `networkaddress.cache.negative.ttl`, global settings for the JVM. As an example, to set your DNS TTL to 1 second you would set the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. +The default value for `networkaddress.cache.ttl` depends on the JVM implementation, +which is 30 seconds for the JDK bundled with Logstash. +The `networkaddress.cache.negative.ttl` setting, that controls how long Java caches +the result of failed DNS lookups, defaults to 10 seconds. + Keep in mind that a connection with keepalive enabled will not reevaluate its DNS value while the keepalive is in effect. diff --git a/docs/plugins/outputs/kafka.asciidoc b/docs/plugins/outputs/kafka.asciidoc index 73e66749e..a865c12f2 100644 --- a/docs/plugins/outputs/kafka.asciidoc +++ b/docs/plugins/outputs/kafka.asciidoc @@ -9,10 +9,10 @@ /////////////////////////////////////////// START - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -:version: v11.6.3 -:release_date: 2025-06-12 -:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.3/CHANGELOG.md -:include_path: ../include +:version: v11.6.4 +:release_date: 2025-08-28 +:changelog_url: https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! ///////////////////////////////////////////