From d5a8901231cef2345bf059ddfe89be7ece6a555e Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 3 Mar 2025 17:29:15 -0500 Subject: [PATCH 01/43] some stuff --- .../_snippets/check-es-running.md | 34 +++ .../self-managed/_snippets/connect-clients.md | 43 ++++ .../_snippets/enable-auto-indices.md | 9 + .../deploy/self-managed/_snippets/trial.md | 1 + .../_snippets/zip-windows-start.md | 60 +++++ .../bootstrap-checks-all-permission.md | 9 - .../bootstrap-checks-client-jvm.md | 9 - ...ootstrap-checks-discovery-configuration.md | 17 -- .../bootstrap-checks-early-access.md | 9 - .../bootstrap-checks-file-descriptor.md | 9 - .../bootstrap-checks-heap-size.md | 9 - .../bootstrap-checks-max-file-size.md | 9 - .../bootstrap-checks-max-map-count.md | 11 - .../bootstrap-checks-memory-lock.md | 9 - .../self-managed/bootstrap-checks-onerror.md | 9 - .../bootstrap-checks-serial-collector.md | 9 - .../bootstrap-checks-syscall-filter.md | 9 - .../deploy/self-managed/bootstrap-checks.md | 227 ++++++++++++++++-- .../self-managed/configure-elasticsearch.md | 2 +- .../deploy/self-managed/configure.md | 8 +- .../deploy/self-managed/deploy-cluster.md | 3 +- .../self-managed/executable-jna-tmpdir.md | 3 + .../deploy/self-managed/file-descriptors.md | 13 +- .../important-settings-configuration.md | 2 +- .../important-system-configuration.md | 25 +- ...asticsearch-from-archive-on-linux-macos.md | 15 +- .../install-elasticsearch-with-docker.md | 4 +- .../install-elasticsearch-with-rpm.md | 50 ++-- ...stall-elasticsearch-with-zip-on-windows.md | 177 ++------------ .../self-managed/installing-elasticsearch.md | 84 ++++++- .../self-managed/max-number-of-threads.md | 12 +- .../self-managed/max-number-threads-check.md | 9 - .../max-size-virtual-memory-check.md | 9 - .../self-managed/networkaddress-cache-ttl.md | 6 +- deploy-manage/deploy/self-managed/plugins.md | 2 +- .../self-managed/setting-system-settings.md | 12 +- .../setup-configuration-memory.md | 15 +- .../self-managed/system-config-tcpretries.md | 7 +- .../deploy/self-managed/vm-max-map-count.md | 9 +- .../bootstrap-checks-xpack.md | 48 ---- .../install-elasticsearch.md | 88 ------- 41 files changed, 566 insertions(+), 529 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/check-es-running.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/connect-clients.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/trial.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-all-permission.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-client-jvm.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-discovery-configuration.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-early-access.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-file-descriptor.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-max-file-size.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-memory-lock.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-onerror.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-serial-collector.md delete mode 100644 deploy-manage/deploy/self-managed/bootstrap-checks-syscall-filter.md delete mode 100644 deploy-manage/deploy/self-managed/max-number-threads-check.md delete mode 100644 deploy-manage/deploy/self-managed/max-size-virtual-memory-check.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md diff --git a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md new file mode 100644 index 0000000000..615a52ed5d --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md @@ -0,0 +1,34 @@ +## Check that {{es}} is running [_check_that_elasticsearch_is_running_2] + +You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: + +```sh +curl --cacert %ES_HOME%\config\certs\http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> +``` + +1. Ensure that you use `https` in your call, or the request will fail.`--cacert` +: Path to the generated `http_ca.crt` certificate for the HTTP layer. + + + +The call returns a response like this: + +```js +{ + "name" : "Cp8oag6", + "cluster_name" : "elasticsearch", + "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", + "version" : { + "number" : "9.0.0-SNAPSHOT", + "build_type" : "tar", + "build_hash" : "f27399d", + "build_flavor" : "default", + "build_date" : "2016-03-30T09:51:41.449Z", + "build_snapshot" : false, + "lucene_version" : "10.0.0", + "minimum_wire_compatibility_version" : "1.2.3", + "minimum_index_compatibility_version" : "1.2.3" + }, + "tagline" : "You Know, for Search" +} +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/connect-clients.md b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md new file mode 100644 index 0000000000..f2d8aa9613 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md @@ -0,0 +1,43 @@ +## Connect clients to {{es}} [_connect_clients_to_es_2] + +% This file is reused in each of the installation pages. Ensure that any changes +% you make to this file are applicable across all installation environments. + +When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: + +```sh +{{es-conf}}{{slash}}certs{{slash}}http_ca.crt +``` + +The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. + +If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. + + +### Use the CA fingerprint [_use_the_ca_fingerprint_2] + +Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. + +If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. + +```sh +openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt +``` + +The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. + +```sh +issuer= /CN=Elasticsearch security auto-configuration HTTP CA +SHA256 Fingerprint= +``` + + +### Use the CA certificate [_use_the_ca_certificate_2] + +If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: + +```sh +{{es-conf}}{{slash}}certs{{slash}}http_ca.crt +``` + +Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md b/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md new file mode 100644 index 0000000000..9e21485203 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md @@ -0,0 +1,9 @@ +Some features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow features to create the following indices: + +```yaml +action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* +``` + +::::{important} +If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/trial.md b/deploy-manage/deploy/self-managed/_snippets/trial.md new file mode 100644 index 0000000000..eecd7bb663 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/trial.md @@ -0,0 +1 @@ +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md new file mode 100644 index 0000000000..a138ab9582 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md @@ -0,0 +1,60 @@ +## Run {{es}} from the command line [windows-running] + +Run the following command to start {{es}} from the command line: + +```sh +.\bin\elasticsearch.bat +``` + +When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: + +* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. +* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. +* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. + +The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. + +We recommend storing the `elastic` password as an environment variable in your shell. Example: + +```sh +$ELASTIC_PASSWORD = "your_password" +``` + +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. + +By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. + +To stop {{es}}, press `Ctrl-C`. + + +### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] + +When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. + +Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. + +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. + +To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. + +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. + + ```sh + bin\elasticsearch-create-enrollment-token -s node + ``` + + Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. + +2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. + + ```sh + bin\elasticsearch --enrollment-token + ``` + + {{es}} automatically generates certificates and keys in the following directory: + + ```sh + config\certs + ``` + +3. Repeat the previous step for any new nodes that you want to enroll. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-all-permission.md b/deploy-manage/deploy/self-managed/bootstrap-checks-all-permission.md deleted file mode 100644 index 12b8bf3581..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-all-permission.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-all-permission.html ---- - -# All permission check [bootstrap-checks-all-permission] - -The all permission check ensures that the security policy used during bootstrap does not grant the `java.security.AllPermission` to Elasticsearch. Running with the all permission granted is equivalent to disabling the security manager. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-client-jvm.md b/deploy-manage/deploy/self-managed/bootstrap-checks-client-jvm.md deleted file mode 100644 index 51480cfcea..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-client-jvm.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-client-jvm.html ---- - -# Client JVM check [bootstrap-checks-client-jvm] - -There are two different JVMs provided by OpenJDK-derived JVMs: the client JVM and the server JVM. These JVMs use different compilers for producing executable machine code from Java bytecode. The client JVM is tuned for startup time and memory footprint while the server JVM is tuned for maximizing performance. The difference in performance between the two VMs can be substantial. The client JVM check ensures that Elasticsearch is not running inside the client JVM. To pass the client JVM check, you must start Elasticsearch with the server VM. On modern systems and operating systems, the server VM is the default. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-discovery-configuration.md b/deploy-manage/deploy/self-managed/bootstrap-checks-discovery-configuration.md deleted file mode 100644 index bd457015a9..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-discovery-configuration.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-discovery-configuration.html ---- - -# Discovery configuration check [bootstrap-checks-discovery-configuration] - -By default, when Elasticsearch first starts up it will try and discover other nodes running on the same host. If no elected master can be discovered within a few seconds then Elasticsearch will form a cluster that includes any other nodes that were discovered. It is useful to be able to form this cluster without any extra configuration in development mode, but this is unsuitable for production because it’s possible to form multiple clusters and lose data as a result. - -This bootstrap check ensures that discovery is not running with the default configuration. It can be satisfied by setting at least one of the following properties: - -* `discovery.seed_hosts` -* `discovery.seed_providers` -* `cluster.initial_master_nodes` - -Note that you must [remove `cluster.initial_master_nodes` from the configuration of every node](important-settings-configuration.md#initial_master_nodes) after the cluster has started for the first time. Instead, configure `discovery.seed_hosts` or `discovery.seed_providers`. If you do not need any discovery configuration, for instance if running a single-node cluster, set `discovery.seed_hosts: []` to disable discovery and satisfy this bootstrap check. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-early-access.md b/deploy-manage/deploy/self-managed/bootstrap-checks-early-access.md deleted file mode 100644 index d3b6b21938..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-early-access.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-early-access.html ---- - -# Early-access check [bootstrap-checks-early-access] - -The OpenJDK project provides early-access snapshots of upcoming releases. These releases are not suitable for production. The early-access check detects these early-access snapshots. To pass this check, you must start Elasticsearch on a release build of the JVM. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-file-descriptor.md b/deploy-manage/deploy/self-managed/bootstrap-checks-file-descriptor.md deleted file mode 100644 index 3f3f309ed1..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-file-descriptor.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-file-descriptor.html ---- - -# File descriptor check [bootstrap-checks-file-descriptor] - -File descriptors are a Unix construct for tracking open "files". In Unix though, [everything is a file](https://en.wikipedia.org/wiki/Everything_is_a_file). For example, "files" could be a physical file, a virtual file (e.g., `/proc/loadavg`), or network sockets. Elasticsearch requires lots of file descriptors (e.g., every shard is composed of multiple segments and other files, plus connections to other nodes, etc.). This bootstrap check is enforced on OS X and Linux. To pass the file descriptor check, you might have to configure [file descriptors](file-descriptors.md). - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md b/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md deleted file mode 100644 index 36abba2ff9..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-heap-size.html ---- - -# Heap size check [bootstrap-checks-heap-size] - -By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. To avoid these issues, start the JVM with an initial heap size equal to the maximum heap size. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-max-file-size.md b/deploy-manage/deploy/self-managed/bootstrap-checks-max-file-size.md deleted file mode 100644 index 02e0a736fd..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-max-file-size.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-max-file-size.html ---- - -# Max file size check [bootstrap-checks-max-file-size] - -The segment files that are the components of individual shards and the translog generations that are components of the translog can get large (exceeding multiple gigabytes). On systems where the max size of files that can be created by the Elasticsearch process is limited, this can lead to failed writes. Therefore, the safest option here is that the max file size is unlimited and that is what the max file size bootstrap check enforces. To pass the max file check, you must configure your system to allow the Elasticsearch process the ability to write files of unlimited size. This can be done via `/etc/security/limits.conf` using the `fsize` setting to `unlimited` (note that you might have to increase the limits for the `root` user too). - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md b/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md deleted file mode 100644 index 46c4cac7be..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-max-map-count.html ---- - -# Maximum map count check [bootstrap-checks-max-map-count] - -Continuing from the previous [point](max-size-virtual-memory-check.md), to use `mmap` effectively, Elasticsearch also requires the ability to create many memory-mapped areas. The maximum map count check checks that the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. - -Alternatively, the maximum map count check is only needed if you are using `mmapfs` or `hybridfs` as the [store type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md) for your indices. If you [do not allow](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md#allow-mmap) the use of `mmap` then this bootstrap check will not be enforced. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-memory-lock.md b/deploy-manage/deploy/self-managed/bootstrap-checks-memory-lock.md deleted file mode 100644 index a44f8830a8..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-memory-lock.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-memory-lock.html ---- - -# Memory lock check [bootstrap-checks-memory-lock] - -When the JVM does a major garbage collection it touches every page of the heap. If any of those pages are swapped out to disk they will have to be swapped back in to memory. That causes lots of disk thrashing that Elasticsearch would much rather use to service requests. There are several ways to configure a system to disallow swapping. One way is by requesting the JVM to lock the heap in memory through `mlockall` (Unix) or virtual lock (Windows). This is done via the Elasticsearch setting [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock). However, there are cases where this setting can be passed to Elasticsearch but Elasticsearch is not able to lock the heap (e.g., if the `elasticsearch` user does not have `memlock unlimited`). The memory lock check verifies that **if** the `bootstrap.memory_lock` setting is enabled, that the JVM was successfully able to lock the heap. To pass the memory lock check, you might have to configure [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock). - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-onerror.md b/deploy-manage/deploy/self-managed/bootstrap-checks-onerror.md deleted file mode 100644 index 07ba5ec076..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-onerror.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-onerror.html ---- - -# OnError and OnOutOfMemoryError checks [bootstrap-checks-onerror] - -The JVM options `OnError` and `OnOutOfMemoryError` enable executing arbitrary commands if the JVM encounters a fatal error (`OnError`) or an `OutOfMemoryError` (`OnOutOfMemoryError`). However, by default, Elasticsearch system call filters (seccomp) are enabled and these filters prevent forking. Thus, using `OnError` or `OnOutOfMemoryError` and system call filters are incompatible. The `OnError` and `OnOutOfMemoryError` checks prevent Elasticsearch from starting if either of these JVM options are used and system call filters are enabled. This check is always enforced. To pass this check do not enable `OnError` nor `OnOutOfMemoryError`; instead, upgrade to Java 8u92 and use the JVM flag `ExitOnOutOfMemoryError`. While this does not have the full capabilities of `OnError` nor `OnOutOfMemoryError`, arbitrary forking will not be supported with seccomp enabled. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-serial-collector.md b/deploy-manage/deploy/self-managed/bootstrap-checks-serial-collector.md deleted file mode 100644 index 0817073a2e..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-serial-collector.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-serial-collector.html ---- - -# Use serial collector check [bootstrap-checks-serial-collector] - -There are various garbage collectors for the OpenJDK-derived JVMs targeting different workloads. The serial collector in particular is best suited for single logical CPU machines or extremely small heaps, neither of which are suitable for running Elasticsearch. Using the serial collector with Elasticsearch can be devastating for performance. The serial collector check ensures that Elasticsearch is not configured to run with the serial collector. To pass the serial collector check, you must not start Elasticsearch with the serial collector (whether it’s from the defaults for the JVM that you’re using, or you’ve explicitly specified it with `-XX:+UseSerialGC`). Note that the default JVM configuration that ships with Elasticsearch configures Elasticsearch to use the G1GC garbage collector with JDK14 and later versions. For earlier JDK versions, the configuration defaults to the CMS collector. - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-syscall-filter.md b/deploy-manage/deploy/self-managed/bootstrap-checks-syscall-filter.md deleted file mode 100644 index 4d677bf919..0000000000 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-syscall-filter.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-syscall-filter.html ---- - -# System call filter check [bootstrap-checks-syscall-filter] - -Elasticsearch installs system call filters of various flavors depending on the operating system (e.g., seccomp on Linux). These system call filters are installed to prevent the ability to execute system calls related to forking as a defense mechanism against arbitrary code execution attacks on Elasticsearch. The system call filter check ensures that if system call filters are enabled, then they were successfully installed. To pass the system call filter check you must fix any configuration errors on your system that prevented system call filters from installing (check your logs). - diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks.md b/deploy-manage/deploy/self-managed/bootstrap-checks.md index ebb73c6466..f085c97c6b 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks.md @@ -2,31 +2,230 @@ mapped_urls: - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-xpack.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-heap-size.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-file-descriptor.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-memory-lock.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/max-number-threads-check.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-max-file-size.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/max-size-virtual-memory-check.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-max-map-count.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-client-jvm.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-serial-collector.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-syscall-filter.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-onerror.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-early-access.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-all-permission.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks-discovery-configuration.html +applies_to: + deployment: + self: --- -# Bootstrap Checks +# Bootstrap checks [bootstrap-checks] -% What needs to be done: Refine +{{es}} has bootstrap checks that run at startup to ensure that users have configured all [important settings](../../../deploy-manage/deploy/self-managed/important-settings-configuration.md). -% GitHub issue: https://github.com/elastic/docs-projects/issues/340 +These bootstrap checks inspect a variety of {{es}} and system settings and compare them to values that are safe for the operation of {{es}}. If {{es}} is in development mode, any bootstrap checks that fail appear as warnings in the {{es}} log. If {{es}} is in production mode, any bootstrap checks that fail will cause {{es}} to refuse to start. -% Scope notes: This page shows Development vs Production modes. Very useful and needed during the installation. Should we put the child docs in REFERENCE material -> Elasticsearch Bootstrap Checks reference? +There are some bootstrap checks that are always enforced to prevent {{es}} from running with incompatible settings. These checks are documented individually. -% Use migrated content from existing pages that map to this page: +## Development vs. production mode [dev-vs-prod-mode] -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md -% Notes: FYI Colleen McGinnis removed "All children" because the linked page has no children +By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](../../../deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an {{es}} node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. -% Internal links rely on the following IDs being on this page (e.g. as a heading ID, paragraph ID, etc): +Note that HTTP and transport can be configured independently via [`http.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) and [`transport.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings). This can be useful for configuring a single node to be reachable via HTTP for testing purposes without triggering production mode. -$$$dev-vs-prod-mode$$$ + +## Single-node discovery [single-node-discovery] + +Some users need to bind the transport to an external interface for testing a remote-cluster configuration. For this situation, we provide the discovery type `single-node`. To enable it, set `discovery.type` to `single-node`. In this situation, a node will elect itself master, and will not join a cluster with any other node. + + +## Forcing the bootstrap checks [_forcing_the_bootstrap_checks] + +If you are running a single node in production, it is possible to evade the bootstrap checks, either by not binding transport to an external interface, or by binding transport to an external interface and setting the discovery type to `single-node`. For this situation, you can force execution of the bootstrap checks by setting the system property `es.enforce.bootstrap.checks` to `true` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). We strongly encourage you to do this if you are in this specific situation. This system property can be used to force execution of the bootstrap checks independent of the node configuration. + +## Checks + +:::{dropdown} Heap size check + +By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. + +To avoid these issues, start the JVM with an initial heap size equal to the maximum heap size. +::: + +:::{dropdown} File descriptor check + +$$$bootstrap-checks-file-descriptor$$$ + +File descriptors are a Unix construct for tracking open "files". In Unix though, [everything is a file](https://en.wikipedia.org/wiki/Everything_is_a_file). For example, "files" could be a physical file, a virtual file (e.g., `/proc/loadavg`), or network sockets. {{es}} requires lots of file descriptors (e.g., every shard is composed of multiple segments and other files, plus connections to other nodes, etc.). This bootstrap check is enforced on OS X and Linux. + +To pass the file descriptor check, you might have to configure [file descriptors](file-descriptors.md). +::: + +:::{dropdown} Memory lock check + +$$$bootstrap-checks-memory-lock$$$ + +When the JVM does a major garbage collection it touches every page of the heap. If any of those pages are swapped out to disk they will have to be swapped back in to memory. That causes lots of disk thrashing that {{es}} would much rather use to service requests. There are several ways to configure a system to disallow swapping. One way is by requesting the JVM to lock the heap in memory through `mlockall` (Unix) or virtual lock (Windows). This is done via the {{es}} setting [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock). However, there are cases where this setting can be passed to {{es}} but {{es}} is not able to lock the heap (e.g., if the `elasticsearch` user does not have `memlock unlimited`). The memory lock check verifies that **if** the `bootstrap.memory_lock` setting is enabled, that the JVM was successfully able to lock the heap. + +To pass the memory lock check, you might have to configure [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock). +::: + +:::{dropdown} Maximum number of threads check + +$$$max-number-threads-check$$$ + +{{es}} executes requests by breaking the request down into stages and handing those stages off to different thread pool executors. There are different [thread pool executors](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/thread-pool-settings.md) for a variety of tasks within {{es}}. Thus, {{es}} needs the ability to create a lot of threads. The maximum number of threads check ensures that the {{es}} process has the rights to create enough threads under normal use. This check is enforced only on Linux. + +If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the {{es}} process the ability to create at least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). +::: + +:::{dropdown} Max file size check + +$$$bootstrap-checks-max-file-size$$$ + +The segment files that are the components of individual shards and the translog generations that are components of the translog can get large (exceeding multiple gigabytes). On systems where the max size of files that can be created by the {{es}} process is limited, this can lead to failed writes. Therefore, the safest option here is that the max file size is unlimited and that is what the max file size bootstrap check enforces. + +To pass the max file check, you must configure your system to allow the {{es}} process the ability to write files of unlimited size. This can be done via `/etc/security/limits.conf` using the `fsize` setting to `unlimited` (note that you might have to increase the limits for the `root` user too). +::: + +:::{dropdown} Maximum size virtual memory check + +$$$max-size-virtual-memory-check$$$ + +{{es}} and Lucene use `mmap` to great effect to map portions of an index into the {{es}} address space. This keeps certain index data off the JVM heap but in memory for blazing fast access. For this to be effective, the {{es}} should have unlimited address space. The maximum size virtual memory check enforces that the {{es}} process has unlimited address space and is enforced only on Linux. + +To pass the maximum size virtual memory check, you must configure your system to allow the {{es}} process the ability to have unlimited address space. This can be done via adding ` - as unlimited` to `/etc/security/limits.conf`. This may require you to increase the limits for the `root` user too. +::: + +:::{dropdown} Maximum map count check + +$$$bootstrap-checks-max-map-count$$$ + +Continuing from the previous [point](max-size-virtual-memory-check.md), to use `mmap` effectively, {{es}} also requires the ability to create many memory-mapped areas. The maximum map count check checks that the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. + +To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. + +Alternatively, the maximum map count check is only needed if you are using `mmapfs` or `hybridfs` as the [store type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md) for your indices. If you [do not allow](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md#allow-mmap) the use of `mmap` then this bootstrap check will not be enforced. +::: + +:::{dropdown} Client JVM check + +$$$bootstrap-checks-client-jvm$$$ + +There are two different JVMs provided by OpenJDK-derived JVMs: the client JVM and the server JVM. These JVMs use different compilers for producing executable machine code from Java bytecode. The client JVM is tuned for startup time and memory footprint while the server JVM is tuned for maximizing performance. The difference in performance between the two VMs can be substantial. The client JVM check ensures that {{es}} is not running inside the client JVM. + +To pass the client JVM check, you must start {{es}} with the server VM. On modern systems and operating systems, the server VM is the default. +::: + +:::{dropdown} Use serial collector check + +$$$bootstrap-checks-serial-collector$$$ + +There are various garbage collectors for the OpenJDK-derived JVMs targeting different workloads. The serial collector in particular is best suited for single logical CPU machines or extremely small heaps, neither of which are suitable for running {{es}}. Using the serial collector with {{es}} can be devastating for performance. The serial collector check ensures that {{es}} is not configured to run with the serial collector. + +To pass the serial collector check, you must not start {{es}} with the serial collector (whether it’s from the defaults for the JVM that you’re using, or you’ve explicitly specified it with `-XX:+UseSerialGC`). Note that the default JVM configuration that ships with {{es}} configures {{es}} to use the G1GC garbage collector with JDK14 and later versions. For earlier JDK versions, the configuration defaults to the CMS collector. +::: + +:::{dropdown} System call filter check + +$$$bootstrap-checks-syscall-filter$$$ + +{{es}} installs system call filters of various flavors depending on the operating system (e.g., seccomp on Linux). These system call filters are installed to prevent the ability to execute system calls related to forking as a defense mechanism against arbitrary code execution attacks on {{es}}. The system call filter check ensures that if system call filters are enabled, then they were successfully installed. + +To pass the system call filter check you must fix any configuration errors on your system that prevented system call filters from installing (check your logs). +::: + +:::{dropdown} OnError and OnOutOfMemoryError checks + +$$$bootstrap-checks-onerror$$$ + +The JVM options `OnError` and `OnOutOfMemoryError` enable executing arbitrary commands if the JVM encounters a fatal error (`OnError`) or an `OutOfMemoryError` (`OnOutOfMemoryError`). However, by default, {{es}} system call filters (seccomp) are enabled and these filters prevent forking. Thus, using `OnError` or `OnOutOfMemoryError` and system call filters are incompatible. The `OnError` and `OnOutOfMemoryError` checks prevent {{es}} from starting if either of these JVM options are used and system call filters are enabled. This check is always enforced. + +To pass this check, do not enable `OnError` nor `OnOutOfMemoryError`; instead, upgrade to Java 8u92 and use the JVM flag `ExitOnOutOfMemoryError`. While this does not have the full capabilities of `OnError` nor `OnOutOfMemoryError`, arbitrary forking will not be supported with seccomp enabled. +::: + +:::{dropdown} Early-access check + +$$$bootstrap-checks-early-access$$$ + +The OpenJDK project provides early-access snapshots of upcoming releases. These releases are not suitable for production. The early-access check detects these early-access snapshots. + +To pass this check, you must start {{es}} on a release build of the JVM. +::: + +:::{dropdown} All permission check + +$$$bootstrap-checks-all-permission$$$ + +The all permission check ensures that the security policy used during bootstrap does not grant the `java.security.AllPermission` to {{es}}. Running with the all permission granted is equivalent to disabling the security manager. +::: + +:::{dropdown} Discovery configuration check + +$$$bootstrap-checks-discovery-configuration$$$ + +By default, when {{es}} first starts up it will try and discover other nodes running on the same host. If no elected master can be discovered within a few seconds then {{es}} will form a cluster that includes any other nodes that were discovered. It is useful to be able to form this cluster without any extra configuration in development mode, but this is unsuitable for production because it’s possible to form multiple clusters and lose data as a result. + +This bootstrap check ensures that discovery is not running with the default configuration. It can be satisfied by setting at least one of the following properties: + +* `discovery.seed_hosts` +* `discovery.seed_providers` +* `cluster.initial_master_nodes` + +Note that you must [remove `cluster.initial_master_nodes` from the configuration of every node](important-settings-configuration.md#initial_master_nodes) after the cluster has started for the first time. Instead, configure `discovery.seed_hosts` or `discovery.seed_providers`. If you do not need any discovery configuration, for instance if running a single-node cluster, set `discovery.seed_hosts: []` to disable discovery and satisfy this bootstrap check. +::: + +:::{dropdown} Encrypt sensitive data check + +$$$bootstrap-checks-xpack-encrypt-sensitive-data$$$ + +If you use {{watcher}} and have chosen to encrypt sensitive data (by setting `xpack.watcher.encrypt_sensitive_data` to `true`), you must also place a key in the secure settings store. + +To pass this bootstrap check, you must set the `xpack.watcher.encryption_key` on each node in the cluster. For more information, see [Encrypting sensitive data in Watcher](../../../explore-analyze/alerts-cases/watcher/encrypting-data.md). +::: + +:::{dropdown} PKI realm check + +$$$bootstrap-checks-xpack-pki-realm$$$ + +If you use {{es}} {{security-features}} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](../../../deploy-manage/security/set-up-basic-security-plus-https.md). + +To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. +::: + +:::{dropdown} Role mappings check + +$$$bootstrap-checks-xpack-role-mappings$$$ + +If you authenticate users with realms other than `native` or `file` realms, you must create role mappings. These role mappings define which roles are assigned to each user. + +If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see [Using role mapping files](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). + +To pass this bootstrap check, the role mapping files must exist and must be valid. The Distinguished Names (DNs) that are listed in the role mappings files must also be valid. +::: + +::::{dropdown} SSL/TLS check $$$bootstrap-checks-tls$$$ -$$$single-node-discovery$$$ +If you enable {{es}} {{security-features}}, unless you have a trial license, you must configure SSL/TLS for internode-communication. + +:::{note} +Single-node clusters that use a loopback interface do not have this requirement. For more information, see [*Start the {{stack}} with security enabled automatically*](../../../deploy-manage/security/security-certificates-keys.md). +::: + +To pass this bootstrap check, you must [set up SSL/TLS in your cluster](../../../deploy-manage/security/set-up-basic-security.md#encrypt-internode-communication). +:::: + +:::{dropdown} Token SSL check + +$$$bootstrap-checks-xpack-token-ssl$$$ + +If you use {{es}} {{security-features}} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. + +In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see [Security settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md) and [Advanced HTTP settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings). -**This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: +To pass this bootstrap check, you must enable HTTPS or disable the built-in token service. -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md) -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md) \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index c6b267cb52..f1f1f73fe0 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -3,7 +3,7 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html --- -# Configure Elasticsearch [settings] +# Configure {{es}} [settings] {{es}} ships with good defaults and requires very little configuration. Most settings can be changed on a running cluster using the [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) API. diff --git a/deploy-manage/deploy/self-managed/configure.md b/deploy-manage/deploy/self-managed/configure.md index 4f4a5b4cb7..3b3dff59bb 100644 --- a/deploy-manage/deploy/self-managed/configure.md +++ b/deploy-manage/deploy/self-managed/configure.md @@ -96,7 +96,7 @@ $$$elasticsearch-hosts$$$ `elasticsearch.hosts:` $$$elasticsearch-publicBaseUrl$$$ `elasticsearch.publicBaseUrl:` -: The URL through which Elasticsearch is publicly accessible, if any. This will be shown to users in Kibana when they need connection details for your Elasticsearch cluster. +: The URL through which {{es}} is publicly accessible, if any. This will be shown to users in Kibana when they need connection details for your {{es}} cluster. $$$elasticsearch-pingTimeout$$$ `elasticsearch.pingTimeout` : Time in milliseconds to wait for {{es}} to respond to pings. **Default: the value of the [`elasticsearch.requestTimeout`](#elasticsearch-requestTimeout) setting** @@ -247,10 +247,10 @@ $$$tilemap-url$$$ `map.tilemap.url` ![logo cloud](https://doc-icons.s3.us-east-2 : The URL to the service that {{kib}} uses as the default basemap in [maps](../../../explore-analyze/visualize/maps.md) and [vega maps](../../../explore-analyze/visualize/custom-visualizations-with-vega.md#vega-with-a-map). By default, {{kib}} sets a basemap from the [Elastic Maps Service](../../../explore-analyze/visualize/maps/maps-connect-to-ems.md), but users can point to their own Tile Map Service. For example: `"https://tiles.elastic.co/v2/default/{{z}}/{x}/{{y}}.png?elastic_tile_service_tos=agree&my_app_name=kibana"` `migrations.batchSize` -: Defines the number of documents migrated at a time. The higher the value, the faster the Saved Objects migration process performs at the cost of higher memory consumption. If upgrade migrations results in {{kib}} crashing with an out of memory exception or fails due to an Elasticsearch `circuit_breaking_exception`, use a smaller `batchSize` value to reduce the memory pressure. **Default: `1000`** +: Defines the number of documents migrated at a time. The higher the value, the faster the Saved Objects migration process performs at the cost of higher memory consumption. If upgrade migrations results in {{kib}} crashing with an out of memory exception or fails due to an {{es}} `circuit_breaking_exception`, use a smaller `batchSize` value to reduce the memory pressure. **Default: `1000`** `migrations.maxBatchSizeBytes` -: Defines the maximum payload size for indexing batches of upgraded saved objects to avoid migrations failing due to a 413 Request Entity Too Large response from Elasticsearch. This value should be lower than or equal to your Elasticsearch cluster’s `http.max_content_length` configuration option. **Default: `100mb`** +: Defines the maximum payload size for indexing batches of upgraded saved objects to avoid migrations failing due to a 413 Request Entity Too Large response from {{es}}. This value should be lower than or equal to your {{es}} cluster’s `http.max_content_length` configuration option. **Default: `100mb`** `migrations.retryAttempts` : The number of times migrations retry temporary failures, such as a network timeout, 503 status code, or `snapshot_in_progress_exception`. When upgrade migrations frequently fail after exhausting all retry attempts with a message such as `Unable to complete the [...] step after 15 attempts, terminating.`, increase the setting value. **Default: `15`** @@ -386,7 +386,7 @@ $$$server-rate-limiter-enabled$$$ `server.rateLimiter.enabled` $$$server-requestId-allowFromAnyIp$$$ `server.requestId.allowFromAnyIp` -: Sets whether or not the `X-Opaque-Id` header should be trusted from any IP address for identifying requests in logs and forwarded to Elasticsearch. +: Sets whether or not the `X-Opaque-Id` header should be trusted from any IP address for identifying requests in logs and forwarded to {{es}}. `server.requestId.ipAllowlist` : A list of IPv4 and IPv6 address which the `X-Opaque-Id` header should be trusted from. Normally this would be set to the IP addresses of the load balancers or reverse-proxy that end users use to access Kibana. If any are set, [`server.requestId.allowFromAnyIp`](#server-requestId-allowFromAnyIp) must also be set to `false.` diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/deploy-cluster.md index 37e0472937..8c9e3533cb 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/deploy-cluster.md @@ -11,7 +11,7 @@ mapped_urls: % GitHub issue: https://github.com/elastic/docs-projects/issues/340 -% Scope notes: Work with the previous content to explain the different options to install Elasticsearch and Kibana, remove the references to cloud based installation. cover ES + kibana - install of other stack components should be taken care of in that content set hints about install order (First ES then Kib). Add an introduction also to the installation methods (locally, production, multiple OSs). +% Scope notes: Work with the previous content to explain the different options to install {{es}} and Kibana, remove the references to cloud based installation. cover ES + kibana - install of other stack components should be taken care of in that content set hints about install order (First ES then Kib). Add an introduction also to the installation methods (locally, production, multiple OSs). % Use migrated content from existing pages that map to this page: @@ -24,6 +24,7 @@ $$$dedicated-host$$$ **This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: +* [/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md) * [/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md) % Doesn't exist diff --git a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md index 40847b987b..a9a4e4325f 100644 --- a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md +++ b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md @@ -1,6 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/executable-jna-tmpdir.html +applies_to: + deployment: + self: --- # Ensure JNA temporary directory permits executables [executable-jna-tmpdir] diff --git a/deploy-manage/deploy/self-managed/file-descriptors.md b/deploy-manage/deploy/self-managed/file-descriptors.md index 886865e149..059ea30dd5 100644 --- a/deploy-manage/deploy/self-managed/file-descriptors.md +++ b/deploy-manage/deploy/self-managed/file-descriptors.md @@ -1,20 +1,23 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html +applies_to: + deployment: + self: --- -# File Descriptors [file-descriptors] +# Increase the file descriptor limit [file-descriptors] ::::{note} -This is only relevant for Linux and macOS and can be safely ignored if running Elasticsearch on Windows. On Windows that JVM uses an [API](https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx) limited only by available resources. +This is only relevant for Linux and macOS and can be safely ignored if running {{es}} on Windows. On Windows, that JVM uses an [API](https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx) limited only by available resources. :::: -Elasticsearch uses a lot of file descriptors or file handles. Running out of file descriptors can be disastrous and will most probably lead to data loss. Make sure to increase the limit on the number of open files descriptors for the user running Elasticsearch to 65,535 or higher. +{{es}} uses a lot of file descriptors or file handles. Running out of file descriptors can be disastrous and will most probably lead to data loss. Make sure to increase the limit on the number of open files descriptors for the user running {{es}} to 65,535 or higher. -For the `.zip` and `.tar.gz` packages, set [`ulimit -n 65535`](setting-system-settings.md#ulimit) as root before starting Elasticsearch, or set `nofile` to `65535` in [`/etc/security/limits.conf`](setting-system-settings.md#limits.conf). +For the `.zip` and `.tar.gz` packages, set [`ulimit -n 65535`](setting-system-settings.md#ulimit) as root before starting {{es}}, or set `nofile` to `65535` in [`/etc/security/limits.conf`](setting-system-settings.md#limits.conf). -On macOS, you must also pass the JVM option `-XX:-MaxFDLimit` to Elasticsearch in order for it to make use of the higher file descriptor limit. +On macOS, you must also pass the JVM option `-XX:-MaxFDLimit` to {{es}} in order for it to make use of the higher file descriptor limit. RPM and Debian packages already default the maximum number of file descriptors to 65535 and do not require further configuration. diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index 398a0166a6..2eee82acca 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -60,7 +60,7 @@ Don’t modify anything within the data directory or run processes that might in :::: -Elasticsearch offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path.md#multiple-data-paths). +{{es}} offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path.md#multiple-data-paths). ## Cluster name setting [_cluster_name_setting] diff --git a/deploy-manage/deploy/self-managed/important-system-configuration.md b/deploy-manage/deploy/self-managed/important-system-configuration.md index a16ed9892e..64486b8798 100644 --- a/deploy-manage/deploy/self-managed/important-system-configuration.md +++ b/deploy-manage/deploy/self-managed/important-system-configuration.md @@ -1,29 +1,32 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html +applies_to: + deployment: + self: --- -# Important System configuration [system-config] +# Important system configuration [system-config] Ideally, {{es}} should run alone on a server and use all of the resources available to it. In order to do so, you need to configure your operating system to allow the user running {{es}} to access more resources than allowed by default. The following settings **must** be considered before going to production: -* [Configure system settings](setting-system-settings.md) -* [Disable swapping](setup-configuration-memory.md) -* [Increase file descriptors](file-descriptors.md) -* [Ensure sufficient virtual memory](vm-max-map-count.md) -* [Ensure sufficient threads](max-number-of-threads.md) -* [JVM DNS cache settings](networkaddress-cache-ttl.md) -* [Temporary directory not mounted with `noexec`](executable-jna-tmpdir.md) -* [TCP retransmission timeout](system-config-tcpretries.md) +* [](setting-system-settings.md) +* [](setup-configuration-memory.md) +* [](vm-max-map-count.md) +* [](max-number-of-threads.md) +* [](networkaddress-cache-ttl.md) +* [](file-descriptors.md) (Linux and MacOS only) +* [](executable-jna-tmpdir.md) (Linux only) +* [](system-config-tcpretries.md) (Linux only) -## Development mode vs production mode [dev-vs-prod] +## Development mode vs. production mode [dev-vs-prod] By default, {{es}} assumes that you are working in development mode. If any of the above settings are not configured correctly, a warning will be written to the log file, but you will be able to start and run your {{es}} node. -As soon as you configure a network setting like `network.host`, {{es}} assumes that you are moving to production and will upgrade the above warnings to exceptions. These exceptions will prevent your {{es}} node from starting. This is an important safety measure to ensure that you will not lose data because of a malconfigured server. +As soon as you configure a network setting like `network.host`, {{es}} assumes that you are moving to production and will upgrade the above warnings to exceptions. These exceptions will prevent your {{es}} node from starting. This is an important safety measure to ensure that you will not lose data because of a misconfigured server. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index 55c1c4f0f3..f229181b9a 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -3,11 +3,12 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/targz.html --- -# Install Elasticsearch from archive on Linux or MacOS [targz] +# Install {{es}} from archive on Linux or MacOS [targz] {{es}} is available as a `.tar.gz` archive for Linux and MacOS. -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. +:::{include} _snippets/trial.md +::: The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). @@ -157,7 +158,7 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti 3. Repeat the previous step for any new nodes that you want to enroll. -## Check that Elasticsearch is running [_check_that_elasticsearch_is_running] +## Check that {{es}} is running [_check_that_elasticsearch_is_running] You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: @@ -197,7 +198,7 @@ Log printing to `stdout` can be disabled using the `-q` or `--quiet` option on t ## Run as a daemon [setup-installation-daemon] -To run Elasticsearch as a daemon, specify `-d` on the command line, and record the process ID in a file using the `-p` option: +To run {{es}} as a daemon, specify `-d` on the command line, and record the process ID in a file using the `-p` option: ```sh ./bin/elasticsearch -d -p pid @@ -207,7 +208,7 @@ If you have password-protected the {{es}} keystore, you will be prompted to ente Log messages can be found in the `$ES_HOME/logs/` directory. -To shut down Elasticsearch, kill the process ID recorded in the `pid` file: +To shut down {{es}}, kill the process ID recorded in the `pid` file: ```sh pkill -F pid @@ -327,6 +328,6 @@ bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_pas You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: -* Learn how to [configure Elasticsearch](configure-elasticsearch.md). -* Configure [important Elasticsearch settings](important-settings-configuration.md). +* Learn how to [configure {{es}}](configure-elasticsearch.md). +* Configure [important {{es}} settings](important-settings-configuration.md). * Configure [important system settings](important-system-configuration.md). diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 4d53003d00..7a44020e47 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -3,7 +3,7 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html --- -# Install Elasticsearch with Docker [docker] +# Install {{es}} with Docker [docker] Docker images for {{es}} are available from the Elastic Docker registry. A list of all published Docker images and tags is available at [www.docker.elastic.co](https://www.docker.elastic.co). The source code is in [GitHub](https://github.com/elastic/elasticsearch/blob/master/distribution/docker). @@ -261,7 +261,7 @@ Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Com ```txt ... - # Port to expose Elasticsearch HTTP API to the host + # Port to expose {{es}} HTTP API to the host #ES_PORT=9200 ES_PORT=127.0.0.1:9200 ... diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 47010a4254..f55fc88d1d 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -3,9 +3,9 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/rpm.html --- -# Install Elasticsearch with RPM [rpm] +# Install {{es}} with RPM [rpm] -The RPM for Elasticsearch can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install Elasticsearch on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. +The RPM for {{es}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{es}} on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. ::::{note} RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. Please see [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. @@ -14,10 +14,10 @@ RPM install is not supported on distributions with old versions of RPM, such as This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. -The latest stable version of Elasticsearch can be found on the [Download Elasticsearch](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). ::::{note} -Elasticsearch includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) +{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) :::: @@ -26,9 +26,9 @@ For a step-by-step example of setting up the {{stack}} on your own premises, try :::: -## Import the Elasticsearch GPG Key [rpm-key] +## Import the {{es}} GPG Key [rpm-key] -We sign all of our packages with the Elasticsearch Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: +We sign all of our packages with the {{es}} Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: ``` 4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 @@ -43,7 +43,7 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ## Installing from the RPM repository [rpm-repo] ::::{warning} -Version 9.0.0-beta1 of Elasticsearch has not yet been released. +Version 9.0.0-beta1 of {{es}} has not yet been released. :::: @@ -53,11 +53,11 @@ Create a file called `elasticsearch.repo` in the `/etc/yum.repos.d/` directory f ## Download and install the RPM manually [install-rpm] ::::{warning} -Version 9.0.0-beta1 of Elasticsearch has not yet been released. The RPM might not be available. +Version 9.0.0-beta1 of {{es}} has not yet been released. The RPM might not be available. :::: -The RPM for Elasticsearch v9.0.0-beta1 can be downloaded from the website and installed as follows: +The RPM for {{es}} v9.0.0-beta1 can be downloaded from the website and installed as follows: ```sh wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-x86_64.rpm @@ -128,25 +128,25 @@ If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats] -## Running Elasticsearch with `systemd` [rpm-running-systemd] +## Running {{es}} with `systemd` [rpm-running-systemd] -To configure Elasticsearch to start automatically when the system boots up, run the following commands: +To configure {{es}} to start automatically when the system boots up, run the following commands: ```sh sudo /bin/systemctl daemon-reload sudo /bin/systemctl enable elasticsearch.service ``` -Elasticsearch can be started and stopped as follows: +{{es}} can be started and stopped as follows: ```sh sudo systemctl start elasticsearch.service sudo systemctl stop elasticsearch.service ``` -These commands provide no feedback as to whether Elasticsearch was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. +These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once Elasticsearch is up and running. +If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. ```sh echo "keystore_password" > /path/to/my_pwd_file.tmp @@ -155,7 +155,7 @@ sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file. sudo systemctl start elasticsearch.service ``` -By default the Elasticsearch service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. +By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: @@ -195,11 +195,11 @@ Versions of `systemd` prior to 238 do not support the timeout extension mechanis However the `systemd` logs will report that the startup timed out: ```text -Jan 31 01:22:30 debian systemd[1]: Starting Elasticsearch... +Jan 31 01:22:30 debian systemd[1]: Startin... Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start Elasticsearch. +Jan 31 01:37:15 debian systemd[1]: Failed to start {{es}}. ``` To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. @@ -208,7 +208,7 @@ To avoid this, upgrade your `systemd` to at least version 238. You can also temp -## Check that Elasticsearch is running [rpm-check-running] +## Check that {{es}} is running [rpm-check-running] You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: @@ -244,13 +244,13 @@ The call returns a response like this: ``` -## Configuring Elasticsearch [rpm-configuring] +## Configuring {{es}} [rpm-configuring] The `/etc/elasticsearch` directory contains the default runtime configuration for {{es}}. The ownership of this directory and all contained files are set to `root:elasticsearch` on package installations. The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](../../security/secure-settings.md), requires `root:elasticsearch` permissions. -Elasticsearch loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). +{{es}} loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). The RPM also has a system configuration file (`/etc/sysconfig/elasticsearch`), which allows you to set the following parameters: @@ -264,7 +264,7 @@ The RPM also has a system configuration file (`/etc/sysconfig/elasticsearch`), w : Any additional JVM system properties you may want to apply. `RESTART_ON_UPGRADE` -: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your Elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. +: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your {{es}} instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. ::::{note} Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](setting-system-settings.md#systemd) for more information. @@ -320,13 +320,13 @@ The RPM places config files, logs, and the data directory in the appropriate loc | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Elasticsearch home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | +| home |{{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | | conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | | conf | Environment variables including heap size, file descriptors. | `/etc/sysconfig/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | | data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | -| jdk | The bundled Java Development Kit used to run Elasticsearch. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/sysconfig/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | +| jdk | The bundled Java Development Kit used to run {{es}}. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/sysconfig/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | | logs | Log files location. | `/var/log/elasticsearch` | `path.logs` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/elasticsearch/plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | @@ -364,6 +364,6 @@ bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_pas You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: -* Learn how to [configure Elasticsearch](configure-elasticsearch.md). -* Configure [important Elasticsearch settings](important-settings-configuration.md). +* Learn how to [configure {{es}}](configure-elasticsearch.md). +* Configure [important {{es}} settings](important-settings-configuration.md). * Configure [important system settings](important-system-configuration.md). diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 4cdbe53c53..74b26b6cb1 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -1,9 +1,14 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/zip-windows.html +sub: + es-conf: "%ES_HOME%\\config" + slash: "\\" --- -# Install Elasticsearch with .zip on Windows [zip-windows] +# Install {{es}} with .zip on Windows [zip-windows] + +{{es-conf}} {{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup {{es}} to run as a service. @@ -23,10 +28,6 @@ The latest stable version of {{es}} can be found on the [Download {{es}}](https: ## Download and install the `.zip` package [install-windows] -::::{warning} -Version 9.0.0-beta1 of {{es}} has not yet been released. The archive might not be available. -:::: - Download the `.zip` archive for {{es}} 9.0.0-beta1 from: [https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-windows-x86_64.zip](https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-windows-x86_64.zip) @@ -39,80 +40,9 @@ cd C:\Program Files\elasticsearch-9.0.0-beta1 ## Enable automatic creation of system indices [windows-enable-indices] -Some commercial features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow the commercial features to create the following indices: - -```yaml -action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* -``` - -::::{important} -If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. - -:::: - - - -## Run {{es}} from the command line [windows-running] - -Run the following command to start {{es}} from the command line: - -```sh -.\bin\elasticsearch.bat -``` - -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. - -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -$ELASTIC_PASSWORD = "your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. - -By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. - -To stop {{es}}, press `Ctrl-C`. - - -### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] - -When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. - -Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. - -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. - -To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. - -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - - ```sh - bin\elasticsearch-create-enrollment-token -s node - ``` - - Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. - -2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - - ```sh - bin\elasticsearch --enrollment-token - ``` - - {{es}} automatically generates certificates and keys in the following directory: - - ```sh - config\certs - ``` - -3. Repeat the previous step for any new nodes that you want to enroll. +:::{include} _snippets/enable-auto-indices.md +:::{include} _snippets/zip-windows-start.md ## Configure {{es}} on the command line [windows-configuring] @@ -133,43 +63,7 @@ Values that contain spaces must be surrounded with quotes. For instance `-Epath. Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. :::: - - -## Check that Elasticsearch is running [_check_that_elasticsearch_is_running_2] - -You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: - -```sh -curl --cacert %ES_HOME%\config\certs\http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> -``` - -1. Ensure that you use `https` in your call, or the request will fail.`--cacert` -: Path to the generated `http_ca.crt` certificate for the HTTP layer. - - - -The call returns a response like this: - -```js -{ - "name" : "Cp8oag6", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", - "version" : { - "number" : "9.0.0-SNAPSHOT", - "build_type" : "tar", - "build_hash" : "f27399d", - "build_flavor" : "default", - "build_date" : "2016-03-30T09:51:41.449Z", - "build_snapshot" : false, - "lucene_version" : "10.0.0", - "minimum_wire_compatibility_version" : "1.2.3", - "minimum_index_compatibility_version" : "1.2.3" - }, - "tagline" : "You Know, for Search" -} -``` - +:::{include} _snippets/check-es-running.md ## Install and run {{es}} as a service on Windows [windows-service] @@ -255,10 +149,10 @@ The {{es}} service can be configured prior to installation by setting the follow : The password for the user specified in `%SERVICE_USERNAME%`. `SERVICE_DISPLAY_NAME` -: The name of the service. Defaults to `{{es}} %SERVICE_ID%`. +: The name of the service. Defaults to `Elasticsearch %SERVICE_ID%`. `SERVICE_DESCRIPTION` -: The description of the service. Defaults to `{{es}} Windows Service - https://elastic.co`. +: The description of the service. Defaults to `Elasticsearch Windows Service - https://elastic.co`. `ES_JAVA_HOME` : The installation directory of the desired JVM to run the service under. @@ -301,49 +195,8 @@ Using the Manager GUI Most changes (like JVM settings) made through the manager GUI will require a restart of the service to take affect. - - -## Connect clients to {{es}} [_connect_clients_to_es_2] - -When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: - -```sh -%ES_HOME%\config\certs\http_ca.crt -``` - -The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. - - -#### Use the CA fingerprint [_use_the_ca_fingerprint_2] - -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` - -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. - -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -#### Use the CA certificate [_use_the_ca_certificate_2] - -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: - -```sh -%ES_HOME%\config\certs\http_ca.crt -``` - -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. - +:::{include} _snippets/connect-clients.md +::: ## Directory layout of `.zip` archive [windows-layout] @@ -367,6 +220,6 @@ This is very convenient because you don’t have to create any directories to st You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: -* Learn how to [configure Elasticsearch](configure-elasticsearch.md). -* Configure [important Elasticsearch settings](important-settings-configuration.md). +* Learn how to [configure {{es}}](configure-elasticsearch.md). +* Configure [important {{es}} settings](important-settings-configuration.md). * Configure [important system settings](important-system-configuration.md). diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index fd08d3bf1d..b0ac787463 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -5,7 +5,7 @@ mapped_urls: - https://www.elastic.co/guide/en/elastic-stack/current/installing-stack-demo-self.html --- -# Installing Elasticsearch +# Install {{es}} [install-elasticsearch] % What needs to be done: Refine @@ -59,6 +59,84 @@ $$$install-stack-self-view-data$$$ **This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md) * [/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md) -* [/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md](/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md) \ No newline at end of file +* [/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md](/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md) + + + +If you want to install and manage {{es}} yourself, you can: + +* Run {{es}} using a [Linux, MacOS, or Windows install package](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). +* Run {{es}} in a [Docker container](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). +* Set up and manage {{es}}, {{kib}}, {{agent}}, and the rest of the Elastic Stack on Kubernetes with [{{eck}}](https://www.elastic.co/guide/en/cloud-on-k8s/current). + +::::{tip} +To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see [Run Elasticsearch locally](../../../solutions/search/get-started.md). This setup is not suitable for production use. +:::: + + + +## Elasticsearch install packages [elasticsearch-install-packages] + +Elasticsearch is provided in the following package formats: + +Linux and MacOS `tar.gz` archives +: The `tar.gz` archives are available for installation on any Linux distribution and MacOS. + + [Install {{es}} from archive on Linux or MacOS](../../../deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) + + +Windows `.zip` archive +: The `zip` archive is suitable for installation on Windows. + + [Install {{es}} with `.zip` on Windows](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) + + +`deb` +: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elasticsearch website or from our Debian repository. + + [Install Elasticsearch with Debian Package](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) + + +`rpm` +: The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elasticsearch website or from our RPM repository. + + [Install Elasticsearch with RPM](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) + + +::::{tip} +For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md). +:::: + + + +## Elasticsearch container images [elasticsearch-docker-images] + +You can also run {{es}} inside a container image. + +`docker` +: Docker container images may be downloaded from the Elastic Docker Registry. + + [Install {{es}} with Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) + + + +## Java (JVM) Version [jvm-version] + +{{es}} is built using Java, and includes a bundled version of [OpenJDK](https://openjdk.java.net) within each distribution. We strongly recommend using the bundled JVM in all installations of {{es}}. + +The bundled JVM is treated the same as any other dependency of {{es}} in terms of support and maintenance. This means that Elastic takes responsibility for keeping it up to date, and reacts to security issues and bug reports as needed to address vulnerabilities and other bugs in {{es}}. Elastic’s support of the bundled JVM is subject to Elastic’s [support policy](https://www.elastic.co/support_policy) and [end-of-life schedule](https://www.elastic.co/support/eol) and is independent of the support policy and end-of-life schedule offered by the original supplier of the JVM. Elastic does not support using the bundled JVM for purposes other than running {{es}}. + +::::{tip} +{{es}} uses only a subset of the features offered by the JVM. Bugs and security issues in the bundled JVM often relate to features that {{es}} does not use. Such issues do not apply to {{es}}. Elastic analyzes reports of security vulnerabilities in all its dependencies, including in the bundled JVM, and will issue an [Elastic Security Advisory](https://www.elastic.co/community/security) if such an advisory is needed. +:::: + + +If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.md) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. + +To use your own version of Java, set the `ES_JAVA_HOME` environment variable to the path to your own JVM installation. The bundled JVM is located within the `jdk` subdirectory of the {{es}} home directory. You may remove this directory if using your own JVM. + + +## JVM and Java agents [jvm-agents] + +Don’t use third-party Java agents that attach to the JVM. These agents can reduce {{es}} performance, including freezing or crashing nodes. diff --git a/deploy-manage/deploy/self-managed/max-number-of-threads.md b/deploy-manage/deploy/self-managed/max-number-of-threads.md index 2054bbbe0d..c7b950729a 100644 --- a/deploy-manage/deploy/self-managed/max-number-of-threads.md +++ b/deploy-manage/deploy/self-managed/max-number-of-threads.md @@ -1,13 +1,17 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/max-number-of-threads.html +navigation_title: Increase max number of threads +applies_to: + deployment: + self: --- -# Number of threads [max-number-of-threads] +# Increase the maximum number of threads [max-number-of-threads] -Elasticsearch uses a number of thread pools for different types of operations. It is important that it is able to create new threads whenever needed. Make sure that the number of threads that the Elasticsearch user can create is at least 4096. +{{es}} uses a number of thread pools for different types of operations. It is important that it is able to create new threads whenever needed. Make sure that the number of threads that the {{es}} user can create is at least 4096. -This can be done by setting [`ulimit -u 4096`](setting-system-settings.md#ulimit) as root before starting Elasticsearch, or by setting `nproc` to `4096` in [`/etc/security/limits.conf`](setting-system-settings.md#limits.conf). +This can be done by setting [`ulimit -u 4096`](setting-system-settings.md#ulimit) as root before starting {{es}}, or by setting `nproc` to `4096` in [`/etc/security/limits.conf`](setting-system-settings.md#limits.conf). -The package distributions when run as services under `systemd` will configure the number of threads for the Elasticsearch process automatically. No additional configuration is required. +The package distributions when run as services under `systemd` will configure the number of threads for the {{es}} process automatically. No additional configuration is required. diff --git a/deploy-manage/deploy/self-managed/max-number-threads-check.md b/deploy-manage/deploy/self-managed/max-number-threads-check.md deleted file mode 100644 index 1212518221..0000000000 --- a/deploy-manage/deploy/self-managed/max-number-threads-check.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/max-number-threads-check.html ---- - -# Maximum number of threads check [max-number-threads-check] - -Elasticsearch executes requests by breaking the request down into stages and handing those stages off to different thread pool executors. There are different [thread pool executors](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/thread-pool-settings.md) for a variety of tasks within Elasticsearch. Thus, Elasticsearch needs the ability to create a lot of threads. The maximum number of threads check ensures that the Elasticsearch process has the rights to create enough threads under normal use. This check is enforced only on Linux. If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the Elasticsearch process the ability to create at least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). - diff --git a/deploy-manage/deploy/self-managed/max-size-virtual-memory-check.md b/deploy-manage/deploy/self-managed/max-size-virtual-memory-check.md deleted file mode 100644 index ef6d33d1a7..0000000000 --- a/deploy-manage/deploy/self-managed/max-size-virtual-memory-check.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/max-size-virtual-memory-check.html ---- - -# Maximum size virtual memory check [max-size-virtual-memory-check] - -Elasticsearch and Lucene use `mmap` to great effect to map portions of an index into the Elasticsearch address space. This keeps certain index data off the JVM heap but in memory for blazing fast access. For this to be effective, the Elasticsearch should have unlimited address space. The maximum size virtual memory check enforces that the Elasticsearch process has unlimited address space and is enforced only on Linux. To pass the maximum size virtual memory check, you must configure your system to allow the Elasticsearch process the ability to have unlimited address space. This can be done via adding ` - as unlimited` to `/etc/security/limits.conf`. This may require you to increase the limits for the `root` user too. - diff --git a/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md b/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md index c554d7a342..66a68bbeb4 100644 --- a/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md +++ b/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md @@ -1,9 +1,11 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/networkaddress-cache-ttl.html +applies_to: + deployment: + self: --- # DNS cache settings [networkaddress-cache-ttl] -Elasticsearch runs with a security manager in place. With a security manager in place, the JVM defaults to caching positive hostname resolutions indefinitely and defaults to caching negative hostname resolutions for ten seconds. Elasticsearch overrides this behavior with default values to cache positive lookups for sixty seconds, and to cache negative lookups for ten seconds. These values should be suitable for most environments, including environments where DNS resolutions vary with time. If not, you can edit the values `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). Note that the values [`networkaddress.cache.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) and [`networkaddress.cache.negative.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) in the [Java security policy](https://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.md) are ignored by Elasticsearch unless you remove the settings for `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl`. - +{{es}} runs with a security manager in place. With a security manager in place, the JVM defaults to caching positive hostname resolutions indefinitely and defaults to caching negative hostname resolutions for ten seconds. {{es}} overrides this behavior with default values to cache positive lookups for sixty seconds, and to cache negative lookups for ten seconds. These values should be suitable for most environments, including environments where DNS resolutions vary with time. If not, you can edit the values `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). Note that the values [`networkaddress.cache.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) and [`networkaddress.cache.negative.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) in the [Java security policy](https://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.md) are ignored by {{es}} unless you remove the settings for `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl`. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/plugins.md b/deploy-manage/deploy/self-managed/plugins.md index 1bb9edc9bd..09dde45fe6 100644 --- a/deploy-manage/deploy/self-managed/plugins.md +++ b/deploy-manage/deploy/self-managed/plugins.md @@ -5,7 +5,7 @@ mapped_pages: # Plugins [modules-plugins] -Plugins are a way to enhance the basic Elasticsearch functionality in a custom manner. They range from adding custom mapping types, custom analyzers (in a more built in fashion), custom script engines, custom discovery and more. +Plugins are a way to enhance the basic {{es}} functionality in a custom manner. They range from adding custom mapping types, custom analyzers (in a more built in fashion), custom script engines, custom discovery and more. For information about selecting and installing plugins, see [{{es}} Plugins and Integrations](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/index.md). diff --git a/deploy-manage/deploy/self-managed/setting-system-settings.md b/deploy-manage/deploy/self-managed/setting-system-settings.md index 3679079cf6..257e56915e 100644 --- a/deploy-manage/deploy/self-managed/setting-system-settings.md +++ b/deploy-manage/deploy/self-managed/setting-system-settings.md @@ -1,11 +1,14 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-system-settings.html +applies_to: + deployment: + self: --- -# Configuring system settings [setting-system-settings] +# Configure system settings [setting-system-settings] -Where to configure systems settings depends on which package you have used to install Elasticsearch, and which operating system you are using. +Where to configure systems settings depends on which package you have used to install {{es}}, and which operating system you are using. When using the `.zip` or `.tar.gz` packages, system settings can be configured: @@ -14,9 +17,10 @@ When using the `.zip` or `.tar.gz` packages, system settings can be configured: When using the RPM or Debian packages, most system settings are set in the [system configuration file](#sysconfig). However, systems which use systemd require that system limits are specified in a [systemd configuration file](#systemd). + ## `ulimit` [ulimit] -On Linux systems, `ulimit` can be used to change resource limits on a temporary basis. Limits usually need to be set as `root` before switching to the user that will run Elasticsearch. For example, to set the number of open file handles (`ulimit -n`) to 65,535, you can do the following: +On Linux systems, `ulimit` can be used to change resource limits on a temporary basis. Limits usually need to be set as `root` before switching to the user that will run {{es}}. For example, to set the number of open file handles (`ulimit -n`) to 65,535, you can do the following: ```sh sudo su <1> @@ -26,7 +30,7 @@ su elasticsearch <3> 1. Become `root`. 2. Change the max number of open files. -3. Become the `elasticsearch` user in order to start Elasticsearch. +3. Become the `elasticsearch` user in order to start {{es}}. The new limit is only applied during the current session. diff --git a/deploy-manage/deploy/self-managed/setup-configuration-memory.md b/deploy-manage/deploy/self-managed/setup-configuration-memory.md index 1c60e13573..a84c980c05 100644 --- a/deploy-manage/deploy/self-managed/setup-configuration-memory.md +++ b/deploy-manage/deploy/self-managed/setup-configuration-memory.md @@ -1,19 +1,22 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html +applies_to: + deployment: + self: --- # Disable swapping [setup-configuration-memory] Most operating systems try to use as much memory as possible for file system caches and eagerly swap out unused application memory. This can result in parts of the JVM heap or even its executable pages being swapped out to disk. -Swapping is very bad for performance, for node stability, and should be avoided at all costs. It can cause garbage collections to last for **minutes** instead of milliseconds and can cause nodes to respond slowly or even to disconnect from the cluster. In a resilient distributed system, it’s more effective to let the operating system kill the node. +Swapping is very bad for performance, for node stability, and should be avoided at all costs. It can cause garbage collections to last for minutes instead of milliseconds and can cause nodes to respond slowly or even to disconnect from the cluster. In a resilient distributed system, it’s more effective to let the operating system kill the node. There are three approaches to disabling swapping. The preferred option is to completely disable swap. If this is not an option, whether or not to prefer minimizing swappiness versus memory locking is dependent on your environment. ## Disable all swap files [disable-swap-files] -Usually Elasticsearch is the only service running on a box, and its memory usage is controlled by the JVM options. There should be no need to have swap enabled. +Usually {{es}} is the only service running on a box, and its memory usage is controlled by the JVM options. There should be no need to have swap enabled. On Linux systems, you can disable swap temporarily by running: @@ -21,7 +24,7 @@ On Linux systems, you can disable swap temporarily by running: sudo swapoff -a ``` -This doesn’t require a restart of Elasticsearch. +This doesn’t require a restart of {{es}}. To disable it permanently, you will need to edit the `/etc/fstab` file and comment out any lines that contain the word `swap`. @@ -49,11 +52,11 @@ bootstrap.memory_lock: true ``` ::::{warning} -`mlockall` might cause the JVM or shell session to exit if it tries to allocate more memory than is available! +`mlockall` might cause the JVM or shell session to exit if it tries to allocate more memory than is available. :::: -After starting Elasticsearch, you can see whether this setting was applied successfully by checking the value of `mlockall` in the output from this request: +After starting {{es}}, you can see whether this setting was applied successfully by checking the value of `mlockall` in the output from this request: ```console GET _nodes?filter_path=**.mlockall @@ -61,7 +64,7 @@ GET _nodes?filter_path=**.mlockall If you see that `mlockall` is `false`, then it means that the `mlockall` request has failed. You will also see a line with more information in the logs with the words `Unable to lock JVM Memory`. -The most probable reason, on Linux/Unix systems, is that the user running Elasticsearch doesn’t have permission to lock memory. This can be granted as follows: +The most probable reason, on Linux/Unix systems, is that the user running {{es}} doesn’t have permission to lock memory. This can be granted as follows: `.zip` and `.tar.gz` : Set [`ulimit -l unlimited`](setting-system-settings.md#ulimit) as root before starting {{es}}. Alternatively, set `memlock` to `unlimited` in `/etc/security/limits.conf`: diff --git a/deploy-manage/deploy/self-managed/system-config-tcpretries.md b/deploy-manage/deploy/self-managed/system-config-tcpretries.md index 91a3e83e40..1b94ddeb34 100644 --- a/deploy-manage/deploy/self-managed/system-config-tcpretries.md +++ b/deploy-manage/deploy/self-managed/system-config-tcpretries.md @@ -1,9 +1,14 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config-tcpretries.html + --- -# TCP retransmission timeout [system-config-tcpretries] +# Decrease the TCP retransmission timeout [system-config-tcpretries] + +::::{note} +This is only relevant for Linux. +:::: Each pair of {{es}} nodes communicates via a number of TCP connections which [remain open](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#long-lived-connections) until one of the nodes shuts down or communication between the nodes is disrupted by a failure in the underlying infrastructure. diff --git a/deploy-manage/deploy/self-managed/vm-max-map-count.md b/deploy-manage/deploy/self-managed/vm-max-map-count.md index 808376a7da..f9f32f173b 100644 --- a/deploy-manage/deploy/self-managed/vm-max-map-count.md +++ b/deploy-manage/deploy/self-managed/vm-max-map-count.md @@ -1,11 +1,14 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html +applies_to: + deployment: + self: --- -# Virtual memory [vm-max-map-count] +# Increase virtual memory [vm-max-map-count] -Elasticsearch uses a [`mmapfs`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md#mmapfs) directory by default to store its indices. The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. +{{es}} uses a [`mmapfs`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/store.md#mmapfs) directory by default to store its indices. The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. On Linux, you can increase the limits by running the following command as `root`: @@ -17,7 +20,7 @@ To set this value permanently, update the `vm.max_map_count` setting in `/etc/sy The RPM and Debian packages will configure this setting automatically. No further configuration is required. -You can find out the current mmap count of a running Elasticsearch process using the following command, where `$PID` is the process ID of the running Elasticsearch process: +You can find out the current mmap count of a running {{es}} process using the following command, where `$PID` is the process ID of the running {{es}} process: ```sh wc -l /proc/$PID/maps diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md deleted file mode 100644 index e627ecbbba..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md +++ /dev/null @@ -1,48 +0,0 @@ -# Bootstrap Checks for {{xpack}} [bootstrap-checks-xpack] - -In addition to the [{{es}} bootstrap checks](../../../deploy-manage/deploy/self-managed/bootstrap-checks.md), there are checks that are specific to {{xpack}} features. - - -## Encrypt sensitive data check [bootstrap-checks-xpack-encrypt-sensitive-data] - -If you use {{watcher}} and have chosen to encrypt sensitive data (by setting `xpack.watcher.encrypt_sensitive_data` to `true`), you must also place a key in the secure settings store. - -To pass this bootstrap check, you must set the `xpack.watcher.encryption_key` on each node in the cluster. For more information, see [Encrypting sensitive data in Watcher](../../../explore-analyze/alerts-cases/watcher/encrypting-data.md). - - -## PKI realm check [bootstrap-checks-xpack-pki-realm] - -If you use {{es}} {{security-features}} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](../../../deploy-manage/security/set-up-basic-security-plus-https.md). - -To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. - - -## Role mappings check [bootstrap-checks-xpack-role-mappings] - -If you authenticate users with realms other than `native` or `file` realms, you must create role mappings. These role mappings define which roles are assigned to each user. - -If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see [Using role mapping files](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). - -To pass this bootstrap check, the role mapping files must exist and must be valid. The Distinguished Names (DNs) that are listed in the role mappings files must also be valid. - - -## SSL/TLS check [bootstrap-checks-tls] - -If you enable {{es}} {{security-features}}, unless you have a trial license, you must configure SSL/TLS for internode-communication. - -::::{note} -Single-node clusters that use a loopback interface do not have this requirement. For more information, see [*Start the {{stack}} with security enabled automatically*](../../../deploy-manage/security/security-certificates-keys.md). -:::: - - -To pass this bootstrap check, you must [set up SSL/TLS in your cluster](../../../deploy-manage/security/set-up-basic-security.md#encrypt-internode-communication). - - -## Token SSL check [bootstrap-checks-xpack-token-ssl] - -If you use {{es}} {{security-features}} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. - -In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see [Security settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md) and [Advanced HTTP settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings). - -To pass this bootstrap check, you must enable HTTPS or disable the built-in token service. - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md deleted file mode 100644 index 138b35bf61..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md +++ /dev/null @@ -1,88 +0,0 @@ -# Installing Elasticsearch [install-elasticsearch] - - -## {{ecloud}} [hosted-elasticsearch-service] - -{{ecloud}} offers all of the features of {{es}}, {{kib}}, and Elastic’s {{observability}}, and {{elastic-sec}} solutions as a hosted service available on AWS, GCP, and Azure. - -To set up Elasticsearch in {{ecloud}}, sign up for a [free {{ecloud}} trial](https://cloud.elastic.co/registration?page=docs&placement=docs-body). - - -## Self-managed {{es}} options [elasticsearch-deployment-options] - -If you want to install and manage {{es}} yourself, you can: - -* Run {{es}} using a [Linux, MacOS, or Windows install package](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). -* Run {{es}} in a [Docker container](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). -* Set up and manage {{es}}, {{kib}}, {{agent}}, and the rest of the Elastic Stack on Kubernetes with [{{eck}}](https://www.elastic.co/guide/en/cloud-on-k8s/current). - -::::{tip} -To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see [Run Elasticsearch locally](../../../solutions/search/get-started.md). Please note that this setup is **not suitable for production use**. -:::: - - - -## Elasticsearch install packages [elasticsearch-install-packages] - -Elasticsearch is provided in the following package formats: - -Linux and MacOS `tar.gz` archives -: The `tar.gz` archives are available for installation on any Linux distribution and MacOS. - - [Install {{es}} from archive on Linux or MacOS](../../../deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) - - -Windows `.zip` archive -: The `zip` archive is suitable for installation on Windows. - - [Install {{es}} with `.zip` on Windows](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) - - -`deb` -: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elasticsearch website or from our Debian repository. - - [Install Elasticsearch with Debian Package](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) - - -`rpm` -: The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elasticsearch website or from our RPM repository. - - [Install Elasticsearch with RPM](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) - - -::::{tip} -For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md). -:::: - - - -## Elasticsearch container images [elasticsearch-docker-images] - -You can also run {{es}} inside a container image. - -`docker` -: Docker container images may be downloaded from the Elastic Docker Registry. - - [Install {{es}} with Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) - - - -## Java (JVM) Version [jvm-version] - -{{es}} is built using Java, and includes a bundled version of [OpenJDK](https://openjdk.java.net) within each distribution. We strongly recommend using the bundled JVM in all installations of {{es}}. - -The bundled JVM is treated the same as any other dependency of {{es}} in terms of support and maintenance. This means that Elastic takes responsibility for keeping it up to date, and reacts to security issues and bug reports as needed to address vulnerabilities and other bugs in {{es}}. Elastic’s support of the bundled JVM is subject to Elastic’s [support policy](https://www.elastic.co/support_policy) and [end-of-life schedule](https://www.elastic.co/support/eol) and is independent of the support policy and end-of-life schedule offered by the original supplier of the JVM. Elastic does not support using the bundled JVM for purposes other than running {{es}}. - -::::{tip} -{{es}} uses only a subset of the features offered by the JVM. Bugs and security issues in the bundled JVM often relate to features that {{es}} does not use. Such issues do not apply to {{es}}. Elastic analyzes reports of security vulnerabilities in all its dependencies, including in the bundled JVM, and will issue an [Elastic Security Advisory](https://www.elastic.co/community/security) if such an advisory is needed. -:::: - - -If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.md) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. - -To use your own version of Java, set the `ES_JAVA_HOME` environment variable to the path to your own JVM installation. The bundled JVM is located within the `jdk` subdirectory of the {{es}} home directory. You may remove this directory if using your own JVM. - - -## JVM and Java agents [jvm-agents] - -Don’t use third-party Java agents that attach to the JVM. These agents can reduce {{es}} performance, including freezing or crashing nodes. From 334c967e643221bb087288eda3ed556a41abd927 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Tue, 4 Mar 2025 08:52:02 -0500 Subject: [PATCH 02/43] more --- .../self-managed/_snippets/install-next-steps.md | 7 +++++++ .../install-elasticsearch-with-zip-on-windows.md | 12 ++---------- 2 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/install-next-steps.md diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md new file mode 100644 index 0000000000..cf53c9a06b --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -0,0 +1,7 @@ +## Next steps [_next_steps_2] + +You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: + +* Learn how to [configure {{es}}](configure-elasticsearch.md). +* Configure [important {{es}} settings](important-settings-configuration.md). +* Configure [important system settings](important-system-configuration.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 74b26b6cb1..51d3ab684f 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -8,8 +8,6 @@ sub: # Install {{es}} with .zip on Windows [zip-windows] -{{es-conf}} - {{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup {{es}} to run as a service. This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. @@ -215,11 +213,5 @@ This is very convenient because you don’t have to create any directories to st | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `%ES_HOME%\plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | - -## Next steps [_next_steps_2] - -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: - -* Learn how to [configure {{es}}](configure-elasticsearch.md). -* Configure [important {{es}} settings](important-settings-configuration.md). -* Configure [important system settings](important-system-configuration.md). +:::{include} _snippets/install-next-steps.md +::: \ No newline at end of file From 47056c74e2678ace7d4c23207737fbfea5097ac8 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Tue, 4 Mar 2025 15:44:39 -0500 Subject: [PATCH 03/43] more --- .../deploy/self-managed/_snippets/ca-cert.md | 7 + .../self-managed/_snippets/ca-fingerprint.md | 14 + .../_snippets/check-es-running.md | 4 +- .../self-managed/_snippets/connect-clients.md | 33 +- .../self-managed/_snippets/enroll-nodes.md | 29 ++ .../_snippets/etc-elasticsearch.md | 23 ++ .../_snippets/install-next-steps.md | 2 - .../_snippets/join-existing-cluster.md | 18 + .../self-managed/_snippets/other-versions.md | 1 + .../deploy/self-managed/_snippets/pgp-key.md | 6 + .../self-managed/_snippets/security-files.md | 24 ++ .../_snippets/skip-set-kernel-params.md | 3 + .../_snippets/start-security-enabled.md | 14 + .../deploy/self-managed/_snippets/systemd.md | 75 ++++ .../self-managed/_snippets/targz-daemon.md | 19 + .../self-managed/_snippets/targz-start.md | 29 ++ .../_snippets/zip-windows-start.md | 37 +- .../deploy/self-managed/bootstrap-checks.md | 12 +- .../deploy/self-managed/configure.md | 10 +- ...asticsearch-from-archive-on-linux-macos.md | 266 +++---------- ...stall-elasticsearch-with-debian-package.md | 349 ++++-------------- .../install-elasticsearch-with-docker.md | 50 +-- .../install-elasticsearch-with-rpm.md | 331 +++-------------- ...stall-elasticsearch-with-zip-on-windows.md | 57 ++- .../install-from-archive-on-linux-macos.md | 41 +- .../deploy/self-managed/install-on-windows.md | 26 +- .../install-with-debian-package.md | 66 +++- .../self-managed/install-with-docker.md | 38 +- .../deploy/self-managed/install-with-rpm.md | 56 ++- .../self-managed/installing-elasticsearch.md | 45 +-- .../self-managed/system-config-tcpretries.md | 4 +- deploy-manage/toc.yml | 15 - docset.yml | 1 + .../bootstrap-checks.md | 39 -- raw-migrated-files/toc.yml | 3 - 35 files changed, 672 insertions(+), 1075 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/ca-cert.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/join-existing-cluster.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/other-versions.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/pgp-key.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/security-files.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/systemd.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/targz-daemon.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/targz-start.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md diff --git a/deploy-manage/deploy/self-managed/_snippets/ca-cert.md b/deploy-manage/deploy/self-managed/_snippets/ca-cert.md new file mode 100644 index 0000000000..f1636a8139 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/ca-cert.md @@ -0,0 +1,7 @@ +If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: + +```sh +{{es-conf}}{{slash}}certs{{slash}}http_ca.crt +``` + +Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md b/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md new file mode 100644 index 0000000000..413d05829d --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md @@ -0,0 +1,14 @@ +Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. + +If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. + +```sh +openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt +``` + +The command returns the security certificate, including the fingerprint. The `issuer` should be `{{es}} security auto-configuration HTTP CA`. + +```sh +issuer= /CN={{es}} security auto-configuration HTTP CA +SHA256 Fingerprint= +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md index 615a52ed5d..3a140511bf 100644 --- a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md +++ b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md @@ -1,9 +1,7 @@ -## Check that {{es}} is running [_check_that_elasticsearch_is_running_2] - You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: ```sh -curl --cacert %ES_HOME%\config\certs\http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> +curl --cacert {{es-conf}}{{slash}}certs{{slash}}http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> ``` 1. Ensure that you use `https` in your call, or the request will fail.`--cacert` diff --git a/deploy-manage/deploy/self-managed/_snippets/connect-clients.md b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md index f2d8aa9613..9b13dd80b9 100644 --- a/deploy-manage/deploy/self-managed/_snippets/connect-clients.md +++ b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md @@ -1,5 +1,3 @@ -## Connect clients to {{es}} [_connect_clients_to_es_2] - % This file is reused in each of the installation pages. Ensure that any changes % you make to this file are applicable across all installation environments. @@ -11,33 +9,4 @@ When you start {{es}} for the first time, TLS is configured automatically for th The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. - - -### Use the CA fingerprint [_use_the_ca_fingerprint_2] - -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` - -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. - -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -### Use the CA certificate [_use_the_ca_certificate_2] - -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: - -```sh -{{es-conf}}{{slash}}certs{{slash}}http_ca.crt -``` - -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. \ No newline at end of file +If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md new file mode 100644 index 0000000000..272238e259 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md @@ -0,0 +1,29 @@ +When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. + +Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. + +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. + +To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. + +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. + + ```sh + bin{{slash}}elasticsearch-create-enrollment-token -s node + ``` + + Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. + +2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. + + ```sh + bin{{slash}}elasticsearch --enrollment-token + ``` + + {{es}} automatically generates certificates and keys in the following directory: + + ```sh + config{{slash}}certs + ``` + +3. Repeat the previous step for any new nodes that you want to enroll. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md b/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md new file mode 100644 index 0000000000..44c6cc38d6 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md @@ -0,0 +1,23 @@ +The `/etc/elasticsearch` directory contains the default runtime configuration for {{es}}. The ownership of this directory and all contained files are set to `root:elasticsearch` on package installations. + +The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](../../security/secure-settings.md), requires `root:elasticsearch` permissions. + +{{es}} loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). + +The {{distro}} package also has a system configuration file (`/etc/sysconfig/elasticsearch`), which allows you to set the following parameters: + +`ES_JAVA_HOME` +: Set a custom Java path to be used. + +`ES_PATH_CONF` +: Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. + +`ES_JAVA_OPTS` +: Any additional JVM system properties you may want to apply. + +`RESTART_ON_UPGRADE` +: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your {{es}} instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. + +::::{note} +Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](setting-system-settings.md#systemd) for more information. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md index cf53c9a06b..3f45e867fe 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -1,5 +1,3 @@ -## Next steps [_next_steps_2] - You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: * Learn how to [configure {{es}}](configure-elasticsearch.md). diff --git a/deploy-manage/deploy/self-managed/_snippets/join-existing-cluster.md b/deploy-manage/deploy/self-managed/_snippets/join-existing-cluster.md new file mode 100644 index 0000000000..9edb5fe2fb --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/join-existing-cluster.md @@ -0,0 +1,18 @@ +When you install {{es}}, the installation process configures a single-node cluster by default. If you want a node to join an existing cluster instead, generate an enrollment token on an existing node *before* you start the new node for the first time. + +1. On any node in your existing cluster, generate a node enrollment token: + + ```sh + /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node + ``` + +2. Copy the enrollment token, which is output to your terminal. +3. On your new {{es}} node, pass the enrollment token as a parameter to the `elasticsearch-reconfigure-node` tool: + + ```sh + /usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token + ``` + + {{es}} is now configured to join the existing cluster. + +4. [Start your new node using `systemd`](#running-systemd). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/other-versions.md b/deploy-manage/deploy/self-managed/_snippets/other-versions.md new file mode 100644 index 0000000000..8a743ccdbe --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/other-versions.md @@ -0,0 +1 @@ +The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/pgp-key.md b/deploy-manage/deploy/self-managed/_snippets/pgp-key.md new file mode 100644 index 0000000000..2b16de2f6a --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/pgp-key.md @@ -0,0 +1,6 @@ +We sign all of our packages with the {{es}} signing key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: + +``` +4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 +``` +Download and install the public signing key: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/security-files.md b/deploy-manage/deploy/self-managed/_snippets/security-files.md new file mode 100644 index 0000000000..8aae4c831b --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/security-files.md @@ -0,0 +1,24 @@ +When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. + +`http_ca.crt` +: The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. + +`http.p12` +: Keystore that contains the key and certificate for the HTTP layer for this node. + +`transport.p12` +: Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. + +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. + +Use the following command to retrieve the password for `http.p12`: + +```sh +bin/elasticsearch-keystore show xpack.security.http.ssl.keystore.secure_password +``` + +Use the following command to retrieve the password for `transport.p12`: + +```sh +bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_password +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md b/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md new file mode 100644 index 0000000000..db758386bd --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md @@ -0,0 +1,3 @@ +::::{note} +On systemd-based distributions, the installation scripts will attempt to set kernel parameters (e.g., `vm.max_map_count`); you can skip this by masking the systemd-sysctl.service unit. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md b/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md new file mode 100644 index 0000000000..1ac869269e --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md @@ -0,0 +1,14 @@ +When installing {es}, security features are enabled and configured by default. + +When you start {{es}} for the first time, the following security configuration occurs automatically: + +* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. +* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. + +The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. + +We recommend storing the `elastic` password as an environment variable in your shell. For example: + +```sh +export ELASTIC_PASSWORD="your_password" +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd.md b/deploy-manage/deploy/self-managed/_snippets/systemd.md new file mode 100644 index 0000000000..5de8d1943c --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/systemd.md @@ -0,0 +1,75 @@ +To configure {{es}} to start automatically when the system boots up, run the following commands: + +```sh +sudo /bin/systemctl daemon-reload +sudo /bin/systemctl enable elasticsearch.service +``` + +{{es}} can be started and stopped as follows: + +```sh +sudo systemctl start elasticsearch.service +sudo systemctl stop elasticsearch.service +``` + +These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. + +If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. + +```sh +echo "keystore_password" > /path/to/my_pwd_file.tmp +chmod 600 /path/to/my_pwd_file.tmp +sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp +sudo systemctl start elasticsearch.service +``` + +By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. + +When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: + +To tail the journal: + +```sh +sudo journalctl -f +``` + +To list journal entries for the elasticsearch service: + +```sh +sudo journalctl --unit elasticsearch +``` + +To list journal entries for the elasticsearch service starting from a given time: + +```sh +sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" +``` + +Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. + +::::{admonition} Startup timeouts with older `systemd` versions +:class: tip + +By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. + +Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: + +```text +[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... +... +[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... +``` + +However the `systemd` logs will report that the startup timed out: + +```text +Jan 31 01:22:30 debian systemd[1]: Starting {{es}}... +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. +Jan 31 01:37:15 debian systemd[1]: Failed to start {{es}}. +``` + +To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. + +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md new file mode 100644 index 0000000000..3ff887328d --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md @@ -0,0 +1,19 @@ +To run {{es}} as a daemon, specify `-d` on the command line, and record the process ID in a file using the `-p` option: + +```sh +./bin/elasticsearch -d -p pid +``` + +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. + +Log messages can be found in the `$ES_HOME/logs/` directory. + +To shut down {{es}}, kill the process ID recorded in the `pid` file: + +```sh +pkill -F pid +``` + +::::{note} +The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-deb) or [RPM](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-rpm) package instead. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-start.md b/deploy-manage/deploy/self-managed/_snippets/targz-start.md new file mode 100644 index 0000000000..7c50d49b49 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/targz-start.md @@ -0,0 +1,29 @@ +Run the following command to start {{es}} from the command line: + +```sh +./bin/elasticsearch +``` + +When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: + +* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. +* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. +* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. + +The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. + +We recommend storing the `elastic` password as an environment variable in your shell. Example: + +```sh +export ELASTIC_PASSWORD="your_password" +``` + +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. + +By default {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. + +To stop {{es}}, press `Ctrl-C`. + +::::{note} +All scripts packaged with {{es}} require a version of Bash that supports arrays and assume that Bash is available at `/bin/bash`. As such, Bash should be available at this path either directly or via a symbolic link. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md index a138ab9582..76d6c23bc1 100644 --- a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md +++ b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md @@ -1,5 +1,3 @@ -## Run {{es}} from the command line [windows-running] - Run the following command to start {{es}} from the command line: ```sh @@ -24,37 +22,4 @@ If you have password-protected the {{es}} keystore, you will be prompted to ente By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. -To stop {{es}}, press `Ctrl-C`. - - -### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] - -When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. - -Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. - -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. - -To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. - -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - - ```sh - bin\elasticsearch-create-enrollment-token -s node - ``` - - Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. - -2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - - ```sh - bin\elasticsearch --enrollment-token - ``` - - {{es}} automatically generates certificates and keys in the following directory: - - ```sh - config\certs - ``` - -3. Repeat the previous step for any new nodes that you want to enroll. \ No newline at end of file +To stop {{es}}, press `Ctrl-C`. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks.md b/deploy-manage/deploy/self-managed/bootstrap-checks.md index f085c97c6b..1b73aed78f 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks.md @@ -23,7 +23,7 @@ applies_to: # Bootstrap checks [bootstrap-checks] -{{es}} has bootstrap checks that run at startup to ensure that users have configured all [important settings](../../../deploy-manage/deploy/self-managed/important-settings-configuration.md). +{{es}} has bootstrap checks that run at startup to ensure that users have configured all [important settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). These bootstrap checks inspect a variety of {{es}} and system settings and compare them to values that are safe for the operation of {{es}}. If {{es}} is in development mode, any bootstrap checks that fail appear as warnings in the {{es}} log. If {{es}} is in production mode, any bootstrap checks that fail will cause {{es}} to refuse to start. @@ -31,7 +31,7 @@ There are some bootstrap checks that are always enforced to prevent {{es}} from ## Development vs. production mode [dev-vs-prod-mode] -By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](../../../deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an {{es}} node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. +By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](/deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an {{es}} node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. Note that HTTP and transport can be configured independently via [`http.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) and [`transport.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings). This can be useful for configuring a single node to be reachable via HTTP for testing purposes without triggering production mode. @@ -190,7 +190,7 @@ To pass this bootstrap check, you must set the `xpack.watcher.encryption_key` on $$$bootstrap-checks-xpack-pki-realm$$$ -If you use {{es}} {{security-features}} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](../../../deploy-manage/security/set-up-basic-security-plus-https.md). +If you use {{es}} {{security-features}} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](/deploy-manage/security/set-up-basic-security-plus-https.md). To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. ::: @@ -201,7 +201,7 @@ $$$bootstrap-checks-xpack-role-mappings$$$ If you authenticate users with realms other than `native` or `file` realms, you must create role mappings. These role mappings define which roles are assigned to each user. -If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see [Using role mapping files](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). +If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see [Using role mapping files](/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). To pass this bootstrap check, the role mapping files must exist and must be valid. The Distinguished Names (DNs) that are listed in the role mappings files must also be valid. ::: @@ -213,10 +213,10 @@ $$$bootstrap-checks-tls$$$ If you enable {{es}} {{security-features}}, unless you have a trial license, you must configure SSL/TLS for internode-communication. :::{note} -Single-node clusters that use a loopback interface do not have this requirement. For more information, see [*Start the {{stack}} with security enabled automatically*](../../../deploy-manage/security/security-certificates-keys.md). +Single-node clusters that use a loopback interface do not have this requirement. For more information, see [*Start the {{stack}} with security enabled automatically*](/deploy-manage/security/security-certificates-keys.md). ::: -To pass this bootstrap check, you must [set up SSL/TLS in your cluster](../../../deploy-manage/security/set-up-basic-security.md#encrypt-internode-communication). +To pass this bootstrap check, you must [set up SSL/TLS in your cluster](/deploy-manage/security/set-up-basic-security.md#encrypt-internode-communication). :::: :::{dropdown} Token SSL check diff --git a/deploy-manage/deploy/self-managed/configure.md b/deploy-manage/deploy/self-managed/configure.md index 3b3dff59bb..ad13d7ebe4 100644 --- a/deploy-manage/deploy/self-managed/configure.md +++ b/deploy-manage/deploy/self-managed/configure.md @@ -13,7 +13,7 @@ KBN_PATH_CONF=/home/kibana/config ./bin/kibana The default host and port settings configure {{kib}} to run on `localhost:5601`. To change this behavior and allow remote users to connect, you’ll need to update your `kibana.yml` file. You can also enable SSL and set a variety of other options. -Environment variables can be injected into configuration using `${MY_ENV_VAR}` syntax. By default, configuration validation will fail if an environment variable used in the config file is not present when Kibana starts. This behavior can be changed by using a default value for the environment variable, using the `${MY_ENV_VAR:defaultValue}` syntax. +Environment variables can be injected into configuration using `${MY_ENV_VAR}` syntax. By default, configuration validation will fail if an environment variable used in the config file is not present when {{kib}} starts. This behavior can be changed by using a default value for the environment variable, using the `${MY_ENV_VAR:defaultValue}` syntax. `console.ui.enabled` : Toggling this causes the server to regenerate assets on the next startup, which may cause a delay before pages start being served. Set to `false` to disable Console. **Default: `true`** @@ -24,7 +24,7 @@ Environment variables can be injected into configuration using `${MY_ENV_VAR}` s `csp.disableUnsafeEval` : [8.7.0] Set this to `false` to add the [`unsafe-eval`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src#unsafe_eval_expressions) source expression to the `script-src` directive. **Default: `true`** - When `csp.disableUnsafeEval` is set to `true`, Kibana will use a custom version of the Handlebars template library. Handlebars is used in various locations in the Kibana frontend where custom templates can be supplied by the user when for instance setting up a visualisation. If you experience any issues rendering Handlebars templates, please set this setting to `false` and [open an issue](https://github.com/elastic/kibana/issues/new/choose) in the Kibana GitHub repository. + When `csp.disableUnsafeEval` is set to `true`, {{kib}} will use a custom version of the Handlebars template library. Handlebars is used in various locations in the {{kib}} frontend where custom templates can be supplied by the user when for instance setting up a visualisation. If you experience any issues rendering Handlebars templates, please set this setting to `false` and [open an issue](https://github.com/elastic/kibana/issues/new/choose) in the {{kib}} GitHub repository. `csp.worker_src` @@ -96,7 +96,7 @@ $$$elasticsearch-hosts$$$ `elasticsearch.hosts:` $$$elasticsearch-publicBaseUrl$$$ `elasticsearch.publicBaseUrl:` -: The URL through which {{es}} is publicly accessible, if any. This will be shown to users in Kibana when they need connection details for your {{es}} cluster. +: The URL through which {{es}} is publicly accessible, if any. This will be shown to users in {{kib}} when they need connection details for your {{es}} cluster. $$$elasticsearch-pingTimeout$$$ `elasticsearch.pingTimeout` : Time in milliseconds to wait for {{es}} to respond to pings. **Default: the value of the [`elasticsearch.requestTimeout`](#elasticsearch-requestTimeout) setting** @@ -289,7 +289,7 @@ $$$server-basePath$$$ `server.basePath` : Enables you to specify a path to mount {{kib}} at if you are running behind a proxy. Use the [`server.rewriteBasePath`](#server-rewriteBasePath) setting to tell {{kib}} if it should remove the basePath from requests it receives, and to prevent a deprecation warning at startup. This setting cannot end in a slash (`/`). $$$server-publicBaseUrl$$$ `server.publicBaseUrl` -: The publicly available URL that end-users access Kibana at. Must include the protocol, hostname, port (if different than the defaults for `http` and `https`, 80 and 443 respectively), and the [`server.basePath`](#server-basePath) (when that setting is configured explicitly). This setting cannot end in a slash (`/`). +: The publicly available URL that end-users access {{kib}} at. Must include the protocol, hostname, port (if different than the defaults for `http` and `https`, 80 and 443 respectively), and the [`server.basePath`](#server-basePath) (when that setting is configured explicitly). This setting cannot end in a slash (`/`). $$$server-compression$$$ `server.compression.enabled` : Set to `false` to disable HTTP compression for all responses. **Default: `true`** @@ -337,7 +337,7 @@ $$$server-shutdownTimeout$$$ `server.shutdownTimeout` : Sets the grace period for {{kib}} to attempt to resolve any ongoing HTTP requests after receiving a `SIGTERM`/`SIGINT` signal, and before shutting down. Any new HTTP requests received during this period are rejected, because the incoming socket is closed without further processing. **Default: `30s`** $$$server-host$$$ `server.host` -: This setting specifies the host of the back end server. To allow remote users to connect, set the value to the IP address or DNS name of the {{kib}} server. Use `0.0.0.0` to make Kibana listen on all IPs (public and private). **Default: `"localhost"`** +: This setting specifies the host of the back end server. To allow remote users to connect, set the value to the IP address or DNS name of the {{kib}} server. Use `0.0.0.0` to make {{kib}} listen on all IPs (public and private). **Default: `"localhost"`** `server.keepaliveTimeout` : The number of milliseconds to wait for additional data before restarting the [`server.socketTimeout`](#server-socketTimeout) counter. **Default: `"120000"`** diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index f229181b9a..da2f0c1c82 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -1,6 +1,10 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/targz.html +sub: + es-conf: "$ES_HOME/config" + slash: "/" +navigation_title: "Linux or MacOS" --- # Install {{es}} from archive on Linux or MacOS [targz] @@ -10,7 +14,8 @@ mapped_pages: :::{include} _snippets/trial.md ::: -The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/other-versions.md +::: ::::{note} {{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) @@ -19,37 +24,27 @@ The latest stable version of {{es}} can be found on the [Download {{es}}](https: ## Download and install archive for Linux [install-linux] -::::{warning} -Version 9.0.0-beta1 of {{es}} has not yet been released. The archive might not be available. -:::: - - -The Linux archive for {{es}} v9.0.0-beta1 can be downloaded and installed as follows: +The Linux archive for {{es}} {{stack-version}} can be downloaded and installed as follows: ```sh -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-linux-x86_64.tar.gz -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-linux-x86_64.tar.gz.sha512 -shasum -a 512 -c elasticsearch-9.0.0-beta1-linux-x86_64.tar.gz.sha512 <1> -tar -xzf elasticsearch-9.0.0-beta1-linux-x86_64.tar.gz -cd elasticsearch-9.0.0-beta1/ <2> +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-linux-x86_64.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-linux-x86_64.tar.gz.sha512 +shasum -a 512 -c elasticsearch-{{stack-version}}-linux-x86_64.tar.gz.sha512 <1> +tar -xzf elasticsearch-{{stack-version}}-linux-x86_64.tar.gz +cd elasticsearch-{{stack-version}}/ <2> ``` -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch-{{version}}-linux-x86_64.tar.gz: OK`. +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch-{{stack-version}}-linux-x86_64.tar.gz: OK`. 2. This directory is known as `$ES_HOME`. ## Download and install archive for MacOS [install-macos] -::::{warning} -Version 9.0.0-beta1 of {{es}} has not yet been released. The archive might not be available. -:::: - - ::::{admonition} macOS Gatekeeper warnings :class: important -Apple’s rollout of stricter notarization requirements affected the notarization of the 9.0.0-beta1 {{es}} artifacts. If macOS displays a dialog when you first run {{es}} that interrupts it, then you need to take an action to allow it to run. +Apple’s rollout of stricter notarization requirements affected the notarization of the {{stack-version}} {{es}} artifacts. If macOS displays a dialog when you first run {{es}} that interrupts it, then you need to take an action to allow it to run. To prevent Gatekeeper checks on the {{es}} files, run the following command on the downloaded .tar.gz archive or the directory to which was extracted: @@ -62,163 +57,42 @@ Alternatively, you can add a security override by following the instructions in :::: -The MacOS archive for {{es}} v9.0.0-beta1 can be downloaded and installed as follows: +The MacOS archive for {{es}} {{stack-version}} can be downloaded and installed as follows: ```sh -curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-darwin-x86_64.tar.gz -curl https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> -tar -xzf elasticsearch-9.0.0-beta1-darwin-x86_64.tar.gz -cd elasticsearch-9.0.0-beta1/ <2> +curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz +curl https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> +tar -xzf elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz +cd elasticsearch-{{stack-version}}/ <2> ``` 1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch-{{version}}-darwin-x86_64.tar.gz: OK`. 2. This directory is known as `$ES_HOME`. - - ## Enable automatic creation of system indices [targz-enable-indices] -Some commercial features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow the commercial features to create the following indices: - -```yaml -action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* -``` - -::::{important} -If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. - -:::: - - +:::{include} _snippets/enable-auto-indices.md +::: ## Run {{es}} from the command line [targz-running] -Run the following command to start {{es}} from the command line: - -```sh -./bin/elasticsearch -``` - -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. - -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -export ELASTIC_PASSWORD="your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. - -By default {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. - -To stop {{es}}, press `Ctrl-C`. - -::::{note} -All scripts packaged with {{es}} require a version of Bash that supports arrays and assume that Bash is available at `/bin/bash`. As such, Bash should be available at this path either directly or via a symbolic link. -:::: - - - -### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster] - -When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. - -Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. - -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. - -To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. - -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - - ```sh - bin/elasticsearch-create-enrollment-token -s node - ``` - - Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. - -2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - - ```sh - bin/elasticsearch --enrollment-token - ``` - - {{es}} automatically generates certificates and keys in the following directory: - - ```sh - config/certs - ``` +:::{include} _snippets/targz-start.md +::: -3. Repeat the previous step for any new nodes that you want to enroll. +### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] +:::{include} _snippets/enroll-nodes.md +::: ## Check that {{es}} is running [_check_that_elasticsearch_is_running] -You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: - -```sh -curl --cacert $ES_HOME/config/certs/http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> -``` - -1. Ensure that you use `https` in your call, or the request will fail.`--cacert` -: Path to the generated `http_ca.crt` certificate for the HTTP layer. - - - -The call returns a response like this: - -```js -{ - "name" : "Cp8oag6", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", - "version" : { - "number" : "9.0.0-SNAPSHOT", - "build_type" : "tar", - "build_hash" : "f27399d", - "build_flavor" : "default", - "build_date" : "2016-03-30T09:51:41.449Z", - "build_snapshot" : false, - "lucene_version" : "10.0.0", - "minimum_wire_compatibility_version" : "1.2.3", - "minimum_index_compatibility_version" : "1.2.3" - }, - "tagline" : "You Know, for Search" -} -``` - -Log printing to `stdout` can be disabled using the `-q` or `--quiet` option on the command line. - +:::{include} _snippets/check-es-running.md +::: ## Run as a daemon [setup-installation-daemon] -To run {{es}} as a daemon, specify `-d` on the command line, and record the process ID in a file using the `-p` option: - -```sh -./bin/elasticsearch -d -p pid -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. - -Log messages can be found in the `$ES_HOME/logs/` directory. - -To shut down {{es}}, kill the process ID recorded in the `pid` file: - -```sh -pkill -F pid -``` - -::::{note} -The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-deb) or [RPM](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-rpm) package instead. -:::: - - +:::{include} _snippets/targz-daemon.md +::: ## Configure {{es}} on the command line [targz-configuring] @@ -234,49 +108,20 @@ Any settings that can be specified in the config file can also be specified on t Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. :::: - - ## Connect clients to {{es}} [_connect_clients_to_es] -When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: - -```sh -$ES_HOME/config/certs/http_ca.crt -``` - -The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. - - -#### Use the CA fingerprint [_use_the_ca_fingerprint] - -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` - -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. - -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -#### Use the CA certificate [_use_the_ca_certificate] +:::{include} _snippets/connect-clients.md +::: -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: +### Use the CA fingerprint [_use_the_ca_fingerprint_2] -```sh -$ES_HOME/config/certs/http_ca.crt -``` +:::{include} _snippets/ca-fingerprint.md +::: -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. +### Use the CA certificate [_use_the_ca_certificate_2] +:::{include} _snippets/ca-cert.md +::: ## Directory layout of archives [targz-layout] @@ -297,37 +142,10 @@ This is very convenient because you don’t have to create any directories to st ### Security certificates and keys [_security_certificates_and_keys] -When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. - -`http_ca.crt` -: The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. - -`http.p12` -: Keystore that contains the key and certificate for the HTTP layer for this node. - -`transport.p12` -: Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. - -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. - -Use the following command to retrieve the password for `http.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.http.ssl.keystore.secure_password -``` - -Use the following command to retrieve the password for `transport.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_password -``` - - +:::{include} _snippets/security-files.md +::: ## Next steps [_next_steps] -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: - -* Learn how to [configure {{es}}](configure-elasticsearch.md). -* Configure [important {{es}} settings](important-settings-configuration.md). -* Configure [important system settings](important-system-configuration.md). +:::{include} _snippets/install-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index dab8ea2e55..87cbbd85f2 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -1,42 +1,39 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/deb.html +sub: + es-conf: "/etc/elasticsearch" + slash: "/" + distro: "Debian" +navigation_title: Debian --- -# Install Elasticsearch with Debian Package [deb] +# Install {{es}} with a Debian package [deb] -The Debian package for Elasticsearch can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install Elasticsearch on any Debian-based system such as Debian and Ubuntu. +The Debian package for {{es}} can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install {{es}} on any Debian-based system such as Debian and Ubuntu. -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. +:::{include} _snippets/trial.md +::: -The latest stable version of Elasticsearch can be found on the [Download Elasticsearch](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/other-versions.md +::: ::::{note} -Elasticsearch includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) +{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) :::: -## Import the Elasticsearch PGP Key [deb-key] +## Import the {{es}} PGP key [deb-key] -We sign all of our packages with the Elasticsearch Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: - -``` -4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 -``` -Download and install the public signing key: +:::{include} _snippets/pgp-key.md +::: ```sh wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg ``` - ## Installing from the APT repository [deb-repo] -::::{warning} -Version 9.0.0-beta1 of Elasticsearch has not yet been released. -:::: - - You may need to install the `apt-transport-https` package on Debian before proceeding: ```sh @@ -45,270 +42,99 @@ sudo apt-get install apt-transport-https Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: -::::{note} -On systemd-based distributions, the installation scripts will attempt to set kernel parameters (e.g., `vm.max_map_count`); you can skip this by masking the systemd-sysctl.service unit. -:::: - - - -## Download and install the Debian package manually [install-deb] - -::::{warning} -Version 9.0.0-beta1 of Elasticsearch has not yet been released. The package might not be available. -:::: - - -The Debian package for Elasticsearch v9.0.0-beta1 can be downloaded from the website and installed as follows: - -```sh -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-amd64.deb -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-amd64.deb.sha512 -shasum -a 512 -c elasticsearch-9.0.0-beta1-amd64.deb.sha512 <1> -sudo dpkg -i elasticsearch-9.0.0-beta1-amd64.deb -``` - -1. Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch-{{version}}-amd64.deb: OK`. - - - -## Start {{es}} with security enabled [deb-security-configuration] - -When installing {{es}}, security features are enabled and configured by default. When you install {{es}}, the following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. - -The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. - -We recommend storing the `elastic` password as an environment variable in your shell. For example: - ```sh -export ELASTIC_PASSWORD="your_password" +echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list ``` -### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster] +:::{note} +These instructions do not use `add-apt-repository` for several reasons: -When you install {{es}}, the installation process configures a single-node cluster by default. If you want a node to join an existing cluster instead, generate an enrollment token on an existing node *before* you start the new node for the first time. +1. `add-apt-repository` adds entries to the system `/etc/apt/sources.list` file rather than a clean per-repository file in `/etc/apt/sources.list.d`. +2. `add-apt-repository` is not part of the default install on many distributions and requires a number of non-default dependencies. +3. Older versions of `add-apt-repository` always add a `deb-src` entry which will cause errors because we do not provide a source package. If you have added the `deb-src` entry, you will see an error like the following until you delete the `deb-src` line: -1. On any node in your existing cluster, generate a node enrollment token: - - ```sh - /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node ``` - -2. Copy the enrollment token, which is output to your terminal. -3. On your new {{es}} node, pass the enrollment token as a parameter to the `elasticsearch-reconfigure-node` tool: - - ```sh - /usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token + Unable to find expected entry 'main/source/Sources' in Release file + (Wrong sources.list entry or malformed file) ``` +::: - {{es}} is now configured to join the existing cluster. - -4. [Start your new node using `systemd`](#deb-running-systemd). - - - -## Enable automatic creation of system indices [deb-enable-indices] - -Some commercial features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow the commercial features to create the following indices: - -```yaml -action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* -``` - -::::{important} -If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. - -:::: - - - -## Running Elasticsearch with `systemd` [deb-running-systemd] - -To configure Elasticsearch to start automatically when the system boots up, run the following commands: +You can install the {{es}} Debian package with: ```sh -sudo /bin/systemctl daemon-reload -sudo /bin/systemctl enable elasticsearch.service +sudo apt-get update && sudo apt-get install elasticsearch ``` -Elasticsearch can be started and stopped as follows: +:::{warning} +If two entries exist for the same {{es}} repository, you will see an error like this during `apt-get update`: -```sh -sudo systemctl start elasticsearch.service -sudo systemctl stop elasticsearch.service ``` - -These commands provide no feedback as to whether Elasticsearch was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. - -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once Elasticsearch is up and running. - -```sh -echo "keystore_password" > /path/to/my_pwd_file.tmp -chmod 600 /path/to/my_pwd_file.tmp -sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp -sudo systemctl start elasticsearch.service +Duplicate sources.list entry https://artifacts.elastic.co/packages/8.x/apt/ ... ``` -By default the Elasticsearch service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. - -When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: +Examine `/etc/apt/sources.list.d/elasticsearch-9.x.list` for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. +::: -To tail the journal: - -```sh -sudo journalctl -f -``` - -To list journal entries for the elasticsearch service: - -```sh -sudo journalctl --unit elasticsearch -``` +:::{include} _snippets/skip-set-kernel-params.md +::: -To list journal entries for the elasticsearch service starting from a given time: - -```sh -sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" -``` - -Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. - -::::{admonition} Startup timeouts with older `systemd` versions -:class: tip - -By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. - -Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: - -```text -[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... -... -[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... -``` - -However the `systemd` logs will report that the startup timed out: - -```text -Jan 31 01:22:30 debian systemd[1]: Starting Elasticsearch... -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start Elasticsearch. -``` - -To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. - -:::: - - - -## Check that Elasticsearch is running [deb-check-running] +## Download and install the Debian package manually [install-deb] -You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: +The Debian package for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: ```sh -curl --cacert /etc/elasticsearch/certs/http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> -``` - -1. Ensure that you use `https` in your call, or the request will fail.`--cacert` -: Path to the generated `http_ca.crt` certificate for the HTTP layer. - - - -The call returns a response like this: - -```js -{ - "name" : "Cp8oag6", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", - "version" : { - "number" : "9.0.0-SNAPSHOT", - "build_type" : "tar", - "build_hash" : "f27399d", - "build_flavor" : "default", - "build_date" : "2016-03-30T09:51:41.449Z", - "build_snapshot" : false, - "lucene_version" : "10.0.0", - "minimum_wire_compatibility_version" : "1.2.3", - "minimum_index_compatibility_version" : "1.2.3" - }, - "tagline" : "You Know, for Search" -} +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-amd64.deb +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-amd64.deb.sha512 +shasum -a 512 -c elasticsearch-{{stack-version}}-amd64.deb.sha512 <1> +sudo dpkg -i elasticsearch-{{stack-version}}-amd64.deb ``` +1. Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch-{{version}}-amd64.deb: OK`. -## Configuring Elasticsearch [deb-configuring] +## Start {{es}} with security enabled [deb-security-configuration] -The `/etc/elasticsearch` directory contains the default runtime configuration for {{es}}. The ownership of this directory and all contained files are set to `root:elasticsearch` on package installations. +:::{include} _snippets/start-security-enabled.md +::: -The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](../../security/secure-settings.md), requires `root:elasticsearch` permissions. +### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster] -Elasticsearch loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). +:::{include} _snippets/join-existing-cluster.md +::: -The Debian package also has a system configuration file (`/etc/default/elasticsearch`), which allows you to set the following parameters: +## Enable automatic creation of system indices [deb-enable-indices] -`ES_JAVA_HOME` -: Set a custom Java path to be used. +:::{include} _snippets/enable-auto-indices.md +::: -`ES_PATH_CONF` -: Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. +## Running {{es}} with `systemd` [running-systemd] -`ES_JAVA_OPTS` -: Any additional JVM system properties you may want to apply. +:::{include} _snippets/systemd.md +::: -`RESTART_ON_UPGRADE` -: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your Elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. +## Check that {{es}} is running [deb-check-running] -::::{note} -Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](setting-system-settings.md#systemd) for more information. -:::: +:::{include} _snippets/check-es-running.md +::: +## Configuring {{es}} [deb-configuring] +:::{include} _snippets/etc-elasticsearch.md +::: ## Connect clients to {{es}} [_connect_clients_to_es_3] -When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. +:::{include} _snippets/connect-clients.md +::: -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. +### Use the CA fingerprint [_use_the_ca_fingerprint_2] +:::{include} _snippets/ca-fingerprint.md +::: -#### Use the CA fingerprint [_use_the_ca_fingerprint_3] - -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` - -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. - -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -#### Use the CA certificate [_use_the_ca_certificate_3] - -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. +### Use the CA certificate [_use_the_ca_certificate_2] +:::{include} _snippets/ca-cert.md +::: ## Directory layout of Debian package [deb-layout] @@ -316,50 +142,23 @@ The Debian package places config files, logs, and the data directory in the appr | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Elasticsearch home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | +| home | {{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | | conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | | conf | Environment variables including heap size, file descriptors. | `/etc/default/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | | data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | -| jdk | The bundled Java Development Kit used to run Elasticsearch. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/default/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | +| jdk | The bundled Java Development Kit used to run {{es}}. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/default/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | | logs | Log files location. | `/var/log/elasticsearch` | `path.logs` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/elasticsearch/plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | -### Security certificates and keys [_security_certificates_and_keys_2] - -When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. - -`http_ca.crt` -: The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. - -`http.p12` -: Keystore that contains the key and certificate for the HTTP layer for this node. - -`transport.p12` -: Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. - -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. - -Use the following command to retrieve the password for `http.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.http.ssl.keystore.secure_password -``` - -Use the following command to retrieve the password for `transport.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_password -``` - - +### Security certificates and keys [_security_certificates_and_keys] -## Next steps [_next_steps_3] +:::{include} _snippets/security-files.md +::: -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: +## Next steps [_next_steps] -* Learn how to [configure Elasticsearch](configure-elasticsearch.md). -* Configure [important Elasticsearch settings](important-settings-configuration.md). -* Configure [important system settings](important-system-configuration.md). +:::{include} _snippets/install-next-steps.md +::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 7a44020e47..3a323e2aa7 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -1,13 +1,15 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html +navigation_title: "Docker" --- # Install {{es}} with Docker [docker] Docker images for {{es}} are available from the Elastic Docker registry. A list of all published Docker images and tags is available at [www.docker.elastic.co](https://www.docker.elastic.co). The source code is in [GitHub](https://github.com/elastic/elasticsearch/blob/master/distribution/docker). -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. +:::{include} _snippets/trial.md +::: ::::{tip} If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. @@ -33,7 +35,7 @@ To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. For example: ```sh -docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 +docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} ``` @@ -51,13 +53,8 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 3. Pull the {{es}} Docker image. - ::::{warning} - Version 9.0.0-beta1 has not yet been released. No Docker image is currently available for {{es}} 9.0.0-beta1. - :::: - - ```sh - docker pull docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` 4. Optional: Install [Cosign](https://docs.sigstore.dev/cosign/system_config/installation/) for your environment. Then use Cosign to verify the {{es}} image’s signature. @@ -66,13 +63,13 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 ```sh wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The `cosign` command prints the check results and the signature payload in JSON format: ```sh - Verification for docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 -- + Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- The following checks were performed on each of these signatures: - The cosign claims were validated - Existence of the claims in the transparency log was verified offline @@ -82,7 +79,7 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 5. Start an {{es}} container. ```sh - docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` ::::{tip} @@ -93,7 +90,7 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 {{ml-cap}} features such as [semantic search with ELSER](/solutions/search/semantic-search/semantic-search-elser-ingest-pipelines.md) require a larger container with more than 1GB of memory. If you intend to use the {{ml}} capabilities, then start the container with this command: ```sh - docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The command prints the `elastic` user password and an enrollment token for {{kib}}. @@ -138,7 +135,7 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 2. Start a new {{es}} container. Include the enrollment token as an environment variable. ```sh - docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` 3. Call the [cat nodes API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) to verify the node was added to the cluster. @@ -147,32 +144,25 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes ``` - - ### Run {{kib}} [run-kibana-docker] 1. Pull the {{kib}} Docker image. - ::::{warning} - Version 9.0.0-beta1 has not yet been released. No Docker image is currently available for {{kib}} 9.0.0-beta1. - :::: - - ```sh - docker pull docker.elastic.co/kibana/kibana:9.0.0-beta1 + docker pull docker.elastic.co/kibana/kibana:{{stack-version}} ``` 2. Optional: Verify the {{kib}} image’s signature. ```sh wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:9.0.0-beta1 + cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} ``` 3. Start a {{kib}} container. ```sh - docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:9.0.0-beta1 + docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} ``` 4. When {{kib}} starts, it outputs a unique generated link to the terminal. To access {{kib}}, open this link in a web browser. @@ -251,7 +241,7 @@ Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Com ```txt ... # Version of Elastic products - STACK_VERSION=9.0.0-beta1 + STACK_VERSION={{stack-version}} ... ``` @@ -424,7 +414,7 @@ Increased ulimits for [nofile](setting-system-settings.md) and [nproc](max-numbe To check the Docker daemon defaults for ulimits, run: ```sh -docker run --rm docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' +docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' ``` If needed, adjust them in the Daemon or override them per container. For example, when using `docker run`, set: @@ -459,7 +449,7 @@ To manually set the heap size in production, bind mount a [JVM options](asciidoc For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. ```sh -docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 +docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in production. @@ -467,7 +457,7 @@ The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend ### Pin deployments to a specific image version [_pin_deployments_to_a_specific_image_version] -Pin your deployments to a specific version of the {{es}} Docker image. For example `docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1`. +Pin your deployments to a specific version of the {{es}} Docker image. For example `docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}}`. ### Always bind data volumes [_always_bind_data_volumes] @@ -551,7 +541,7 @@ For example: ```sh docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 \ +docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ bin/elasticsearch-keystore create -p ``` @@ -560,7 +550,7 @@ You can also use a `docker run` command to add or update secure settings in the ```sh docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 \ +docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ bin/elasticsearch-keystore \ add my.secure.setting \ my.other.secure.setting @@ -579,7 +569,7 @@ If you’ve already created the keystore and don’t need to update it, you can In some environments, it might make more sense to prepare a custom image that contains your configuration. A `Dockerfile` to achieve this might be as simple as: ```sh -FROM docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 +FROM docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ ``` diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index f55fc88d1d..20c4765934 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -1,318 +1,108 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/rpm.html +sub: + es-conf: "/etc/elasticsearch" + slash: "/" + distro: "RPM" +navigation_title: "RPM" --- # Install {{es}} with RPM [rpm] -The RPM for {{es}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{es}} on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. +The RPM package for {{es}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{es}} on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. ::::{note} RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. Please see [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. :::: +:::{include} _snippets/trial.md +::: -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. - -The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/other-versions.md +::: ::::{note} {{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) :::: - ::::{tip} For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](installing-elasticsearch.md). :::: +## Import the {{es}} PGP key [rpm-key] -## Import the {{es}} GPG Key [rpm-key] - -We sign all of our packages with the {{es}} Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: - -``` -4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 -``` -Download and install the public signing key: +:::{include} _snippets/pgp-key.md +::: ```sh rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ``` - ## Installing from the RPM repository [rpm-repo] -::::{warning} -Version 9.0.0-beta1 of {{es}} has not yet been released. -:::: - - Create a file called `elasticsearch.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: - ## Download and install the RPM manually [install-rpm] -::::{warning} -Version 9.0.0-beta1 of {{es}} has not yet been released. The RPM might not be available. -:::: - - -The RPM for {{es}} v9.0.0-beta1 can be downloaded from the website and installed as follows: +The RPM for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: ```sh -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-x86_64.rpm -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-x86_64.rpm.sha512 -shasum -a 512 -c elasticsearch-9.0.0-beta1-x86_64.rpm.sha512 <1> -sudo rpm --install elasticsearch-9.0.0-beta1-x86_64.rpm +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-x86_64.rpm +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-x86_64.rpm.sha512 +shasum -a 512 -c elasticsearch-{{stack-version}}-x86_64.rpm.sha512 <1> +sudo rpm --install elasticsearch-{{stack-version}}-x86_64.rpm ``` 1. Compares the SHA of the downloaded RPM and the published checksum, which should output `elasticsearch-{{version}}-x86_64.rpm: OK`. - -::::{note} -On systemd-based distributions, the installation scripts will attempt to set kernel parameters (e.g., `vm.max_map_count`); you can skip this by masking the systemd-sysctl.service unit. -:::: - - +:::{include} _snippets/skip-set-kernel-params.md +::: ## Start {{es}} with security enabled [rpm-security-configuration] -When installing {{es}}, security features are enabled and configured by default. When you install {{es}}, the following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. - -The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. - -We recommend storing the `elastic` password as an environment variable in your shell. For example: - -```sh -export ELASTIC_PASSWORD="your_password" -``` +:::{include} _snippets/start-security-enabled.md +::: ### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster_2] -When you install {{es}}, the installation process configures a single-node cluster by default. If you want a node to join an existing cluster instead, generate an enrollment token on an existing node *before* you start the new node for the first time. - -1. On any node in your existing cluster, generate a node enrollment token: - - ```sh - /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node - ``` - -2. Copy the enrollment token, which is output to your terminal. -3. On your new {{es}} node, pass the enrollment token as a parameter to the `elasticsearch-reconfigure-node` tool: - - ```sh - /usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token - ``` - - {{es}} is now configured to join the existing cluster. - -4. [Start your new node using `systemd`](#rpm-running-systemd). - - +:::{include} _snippets/join-existing-cluster.md +::: ## Enable automatic creation of system indices [rpm-enable-indices] -Some commercial features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow the commercial features to create the following indices: - -```yaml -action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* -``` - -::::{important} -If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. - -:::: - - - -## Running {{es}} with `systemd` [rpm-running-systemd] - -To configure {{es}} to start automatically when the system boots up, run the following commands: - -```sh -sudo /bin/systemctl daemon-reload -sudo /bin/systemctl enable elasticsearch.service -``` - -{{es}} can be started and stopped as follows: - -```sh -sudo systemctl start elasticsearch.service -sudo systemctl stop elasticsearch.service -``` - -These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. - -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. - -```sh -echo "keystore_password" > /path/to/my_pwd_file.tmp -chmod 600 /path/to/my_pwd_file.tmp -sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp -sudo systemctl start elasticsearch.service -``` - -By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. - -When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: - -To tail the journal: - -```sh -sudo journalctl -f -``` - -To list journal entries for the elasticsearch service: - -```sh -sudo journalctl --unit elasticsearch -``` - -To list journal entries for the elasticsearch service starting from a given time: - -```sh -sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" -``` - -Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. - -::::{admonition} Startup timeouts with older `systemd` versions -:class: tip - -By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. - -Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: - -```text -[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... -... -[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... -``` - -However the `systemd` logs will report that the startup timed out: - -```text -Jan 31 01:22:30 debian systemd[1]: Startin... -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start {{es}}. -``` - -To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. - -:::: +:::{include} _snippets/enable-auto-indices.md +::: +## Running {{es}} with `systemd` [running-systemd] +:::{include} _snippets/systemd.md +::: ## Check that {{es}} is running [rpm-check-running] -You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: - -```sh -curl --cacert /etc/elasticsearch/certs/http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> -``` - -1. Ensure that you use `https` in your call, or the request will fail.`--cacert` -: Path to the generated `http_ca.crt` certificate for the HTTP layer. - - - -The call returns a response like this: - -```js -{ - "name" : "Cp8oag6", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", - "version" : { - "number" : "9.0.0-SNAPSHOT", - "build_type" : "tar", - "build_hash" : "f27399d", - "build_flavor" : "default", - "build_date" : "2016-03-30T09:51:41.449Z", - "build_snapshot" : false, - "lucene_version" : "10.0.0", - "minimum_wire_compatibility_version" : "1.2.3", - "minimum_index_compatibility_version" : "1.2.3" - }, - "tagline" : "You Know, for Search" -} -``` - +:::{include} _snippets/check-es-running.md +::: ## Configuring {{es}} [rpm-configuring] -The `/etc/elasticsearch` directory contains the default runtime configuration for {{es}}. The ownership of this directory and all contained files are set to `root:elasticsearch` on package installations. - -The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](../../security/secure-settings.md), requires `root:elasticsearch` permissions. - -{{es}} loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). - -The RPM also has a system configuration file (`/etc/sysconfig/elasticsearch`), which allows you to set the following parameters: - -`ES_JAVA_HOME` -: Set a custom Java path to be used. - -`ES_PATH_CONF` -: Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. - -`ES_JAVA_OPTS` -: Any additional JVM system properties you may want to apply. - -`RESTART_ON_UPGRADE` -: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your {{es}} instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. - -::::{note} -Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](setting-system-settings.md#systemd) for more information. -:::: - - +:::{include} _snippets/etc-elasticsearch.md +::: ## Connect clients to {{es}} [_connect_clients_to_es_4] -When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. - - -#### Use the CA fingerprint [_use_the_ca_fingerprint_4] +:::{include} _snippets/connect-clients.md +::: -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` +### Use the CA fingerprint [_use_the_ca_fingerprint_2] -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. +:::{include} _snippets/ca-fingerprint.md +::: -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -#### Use the CA certificate [_use_the_ca_certificate_4] - -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. +### Use the CA certificate [_use_the_ca_certificate_2] +:::{include} _snippets/ca-cert.md +::: ## Directory layout of RPM [rpm-layout] @@ -331,39 +121,12 @@ The RPM places config files, logs, and the data directory in the appropriate loc | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/elasticsearch/plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | -### Security certificates and keys [_security_certificates_and_keys_3] - -When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. - -`http_ca.crt` -: The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. - -`http.p12` -: Keystore that contains the key and certificate for the HTTP layer for this node. - -`transport.p12` -: Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. - -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. - -Use the following command to retrieve the password for `http.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.http.ssl.keystore.secure_password -``` - -Use the following command to retrieve the password for `transport.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_password -``` - - +### Security certificates and keys [_security_certificates_and_keys] -## Next steps [_next_steps_4] +:::{include} _snippets/security-files.md +::: -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: +## Next steps [_next_steps] -* Learn how to [configure {{es}}](configure-elasticsearch.md). -* Configure [important {{es}} settings](important-settings-configuration.md). -* Configure [important system settings](important-system-configuration.md). +:::{include} _snippets/install-next-steps.md +::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 51d3ab684f..17a4dee345 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -4,20 +4,22 @@ mapped_pages: sub: es-conf: "%ES_HOME%\\config" slash: "\\" +navigation_title: Windows --- # Install {{es}} with .zip on Windows [zip-windows] {{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup {{es}} to run as a service. -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. +:::{include} _snippets/trial.md +::: ::::{note} On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. :::: - -The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/other-versions.md +::: ::::{note} {{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) @@ -26,21 +28,29 @@ The latest stable version of {{es}} can be found on the [Download {{es}}](https: ## Download and install the `.zip` package [install-windows] +% link url manually set +Download the `.zip` archive for {{es}} {{stack-version}} from: [https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-windows-x86_64.zip) -Download the `.zip` archive for {{es}} 9.0.0-beta1 from: [https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-windows-x86_64.zip](https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-beta1-windows-x86_64.zip) - -Unzip it with your favorite unzip tool. This will create a folder called `elasticsearch-9.0.0-beta1`, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: +Unzip it with your favorite unzip tool. This will create a folder called `elasticsearch-{{stack-version}}`, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: ```sh -cd C:\Program Files\elasticsearch-9.0.0-beta1 +cd C:\Program Files\elasticsearch-{{stack-version}} ``` - ## Enable automatic creation of system indices [windows-enable-indices] :::{include} _snippets/enable-auto-indices.md +::: + +## Run {{es}} from the command line [windows-running] :::{include} _snippets/zip-windows-start.md +::: + +### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] + +:::{include} _snippets/enroll-nodes.md +::: ## Configure {{es}} on the command line [windows-configuring] @@ -61,7 +71,10 @@ Values that contain spaces must be surrounded with quotes. For instance `-Epath. Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. :::: +## Check that {{es}} is running [_check_that_elasticsearch_is_running_2] + :::{include} _snippets/check-es-running.md +::: ## Install and run {{es}} as a service on Windows [windows-service] @@ -70,7 +83,7 @@ You can install {{es}} as a service that runs in the background or starts automa 1. Install {{es}} as a service. The name of the service and the value of `ES_JAVA_HOME` will be made available during install: ```sh - C:\Program Files\elasticsearch-9.0.0-beta1\bin>elasticsearch-service.bat install + C:\Program Files\elasticsearch-{{stack-version}}\bin>elasticsearch-service.bat install Installing service : "elasticsearch-service-x64" Using ES_JAVA_HOME (64-bit): "C:\jvm\jdk1.8" The service 'elasticsearch-service-x64' has been installed. @@ -79,7 +92,7 @@ You can install {{es}} as a service that runs in the background or starts automa 2. Start {{es}} as a service. When {{es}} starts, authentication is enabled by default: ```sh - C:\Program Files\elasticsearch-9.0.0-beta1\bin>bin\elasticsearch-service.bat start + C:\Program Files\elasticsearch-{{stack-version}}\bin>bin\elasticsearch-service.bat start ``` ::::{note} @@ -89,7 +102,7 @@ You can install {{es}} as a service that runs in the background or starts automa 3. Generate a password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. The password is output to the command line. ```sh - C:\Program Files\elasticsearch-9.0.0-beta1\bin>\bin\elasticsearch-reset-password -u elastic + C:\Program Files\elasticsearch-{{stack-version}}\bin>\bin\elasticsearch-reset-password -u elastic ``` @@ -108,7 +121,7 @@ The system environment variable `ES_JAVA_HOME` should be set to the path of the Run the `elasticsearch-service.bat` script in the `bin\` folder to install, remove, manage, or configure the service and potentially start and stop the service from the command line. ```sh -C:\Program Files\elasticsearch-9.0.0-beta1\bin>elasticsearch-service.bat +C:\Program Files\elasticsearch-{{stack-version}}\bin>elasticsearch-service.bat Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] ``` @@ -147,10 +160,10 @@ The {{es}} service can be configured prior to installation by setting the follow : The password for the user specified in `%SERVICE_USERNAME%`. `SERVICE_DISPLAY_NAME` -: The name of the service. Defaults to `Elasticsearch %SERVICE_ID%`. +: The name of the service. Defaults to `{{es}} %SERVICE_ID%`. `SERVICE_DESCRIPTION` -: The description of the service. Defaults to `Elasticsearch Windows Service - https://elastic.co`. +: The description of the service. Defaults to `{{es}} Windows Service - https://elastic.co`. `ES_JAVA_HOME` : The installation directory of the desired JVM to run the service under. @@ -193,9 +206,21 @@ Using the Manager GUI Most changes (like JVM settings) made through the manager GUI will require a restart of the service to take affect. +## Connect clients to {{es}} [_connect_clients_to_es_4] + :::{include} _snippets/connect-clients.md ::: +### Use the CA fingerprint [_use_the_ca_fingerprint_2] + +:::{include} _snippets/ca-fingerprint.md +::: + +### Use the CA certificate [_use_the_ca_certificate_2] + +:::{include} _snippets/ca-cert.md +::: + ## Directory layout of `.zip` archive [windows-layout] The `.zip` package is entirely self-contained. All files and directories are, by default, contained within `%ES_HOME%` — the directory created when unpacking the archive. @@ -213,5 +238,9 @@ This is very convenient because you don’t have to create any directories to st | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `%ES_HOME%\plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | + + +## Next steps [_next_steps] + :::{include} _snippets/install-next-steps.md ::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md index dc8991e991..72e43e9b41 100644 --- a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md @@ -9,11 +9,11 @@ mapped_pages: # Install from archive on Linux or macOS [targz] -Kibana is provided for Linux and Darwin as a `.tar.gz` package. These packages are the easiest formats to use when trying out Kibana. +{{kib}} is provided for Linux and Darwin as a `.tar.gz` package. These packages are the easiest formats to use when trying out Kibana. This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. -The latest stable version of Kibana can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). ::::{note} macOS is supported for development purposes only and is not covered under the support SLA for [production-supported operating systems](https://www.elastic.co/support/matrix#kibana). @@ -23,15 +23,24 @@ macOS is supported for development purposes only and is not covered under the su ## Download and install the Linux 64-bit package [install-linux64] -Version 9.0.0-beta1 of Kibana has not yet been released. +The Linux archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: +```sh +curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-linux-x86_64.tar.gz +curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-linux-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> +tar -xzf kibana-{{stack-version}}-linux-x86_64.tar.gz +cd kibana-{{stack-version}}/ <2> +``` + +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-linux-x86_64.tar.gz: OK`. +2. This directory is known as `$KIBANA_HOME`. ## Download and install the Darwin package [install-darwin64] ::::{admonition} macOS Gatekeeper warnings :class: important -Apple’s rollout of stricter notarization requirements affected the notarization of the 9.0.0-beta1 {{kib}} artifacts. If macOS displays a dialog when you first run {{kib}} that interrupts it, you will need to take an action to allow it to run. +Apple’s rollout of stricter notarization requirements affected the notarization of the {{stack-version}} {{kib}} artifacts. If macOS displays a dialog when you first run {{kib}} that interrupts it, you will need to take an action to allow it to run. To prevent Gatekeeper checks on the {{kib}} files, run the following command on the downloaded `.tar.gz` archive or the directory to which was extracted: @@ -43,9 +52,17 @@ Alternatively, you can add a security override if a Gatekeeper popup appears by :::: +The Darwin archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: -Version 9.0.0-beta1 of Kibana has not yet been released. +```sh +curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz +curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> +tar -xzf kibana-{{stack-version}}-darwin-x86_64.tar.gz +cd kibana-{{stack-version}}/ <2> +``` +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-darwin-x86_64.tar.gz: OK`. +2. This directory is known as `$KIBANA_HOME`. ## Start {{es}} and generate an enrollment token for {{kib}} [targz-enroll] @@ -62,13 +79,13 @@ You can then start {{kib}} and enter the enrollment token to securely connect {{ ## Run {{kib}} from the command line [targz-running] -Kibana can be started from the command line as follows: +{{kib}} can be started from the command line as follows: ```sh ./bin/kibana ``` -By default, Kibana runs in the foreground, prints its logs to the standard output (`stdout`), and can be stopped by pressing **Ctrl-C**. +By default, {{kib}} runs in the foreground, prints its logs to the standard output (`stdout`), and can be stopped by pressing **Ctrl-C**. If this is the first time you’re starting {{kib}}, this command generates a unique link in your terminal to enroll your {{kib}} instance with {{es}}. @@ -85,19 +102,19 @@ If you need to reset the password for the `elastic` user or other built-in users ## Configure {{kib}} via the config file [targz-configuring] -Kibana loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). ## Directory layout of `.tar.gz` archives [targz-layout] The `.tar.gz` packages are entirely self-contained. All files and directories are, by default, contained within `$KIBANA_HOME` — the directory created when unpacking the archive. -This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling Kibana is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. +This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling {{kib}} is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Kibana home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | -| bin | Binary scripts including `kibana` to start the Kibana server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | +| home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | +| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | | config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | -| data | The location of the data files written to disk by Kibana and its plugins | `$KIBANA_HOME\data` | | +| data | The location of the data files written to disk by {{kib}} and its plugins | `$KIBANA_HOME\data` | | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `$KIBANA_HOME\plugins` | | diff --git a/deploy-manage/deploy/self-managed/install-on-windows.md b/deploy-manage/deploy/self-managed/install-on-windows.md index cd2e52e71f..458f64256a 100644 --- a/deploy-manage/deploy/self-managed/install-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-on-windows.md @@ -8,17 +8,21 @@ mapped_pages: # Install on Windows [windows] - -Kibana can be installed on Windows using the `.zip` package. +{{kib}} can be installed on Windows using the `.zip` package. This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. -The latest stable version of Kibana can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). ## Download and install the `.zip` package [install-windows] -Version 9.0.0-beta1 of Kibana has not yet been released. +Download the .zip windows archive for {{kib}} {{stack-version}} from [https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/kibana/kibana-9.0.0-windows-x86_64.zip) + +Unzip it with your favourite unzip tool. This will create a folder called kibana-{{stack-version}}-windows-x86_64, which we will refer to as $KIBANA_HOME. In a terminal window, CD to the `$KIBANA_HOME` directory, for instance: +```sh +CD c:\kibana-{{stack-version}}-windows-x86_64 +``` ## Start {{es}} and generate an enrollment token for {{kib}} [windows-enroll] @@ -35,13 +39,13 @@ You can then start {{kib}} and enter the enrollment token to securely connect {{ ## Run {{kib}} from the command line [windows-running] -Kibana can be started from the command line as follows: +{{kib}} can be started from the command line as follows: ```sh .\bin\kibana.bat ``` -By default, Kibana runs in the foreground, prints its logs to `STDOUT`, and can be stopped by pressing **Ctrl-C**. +By default, {{kib}} runs in the foreground, prints its logs to `STDOUT`, and can be stopped by pressing **Ctrl-C**. If this is the first time you’re starting {{kib}}, this command generates a unique link in your terminal to enroll your {{kib}} instance with {{es}}. @@ -58,19 +62,19 @@ If you need to reset the password for the `elastic` user or other built-in users ## Configure {{kib}} via the config file [windows-configuring] -Kibana loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). ## Directory layout of `.zip` archive [windows-layout] The `.zip` package is entirely self-contained. All files and directories are, by default, contained within `$KIBANA_HOME` — the directory created when unpacking the archive. -This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling Kibana is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. +This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling {{kib}} is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Kibana home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | -| bin | Binary scripts including `kibana` to start the Kibana server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | +| home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | +| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | | config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | -| | data | `The location of the data files written to disk by Kibana and its plugins` | `$KIBANA_HOME\data` | +| | data | `The location of the data files written to disk by {{kib}} and its plugins` | `$KIBANA_HOME\data` | | | plugins | `Plugin files location. Each plugin will be contained in a subdirectory.` | `$KIBANA_HOME\plugins` | diff --git a/deploy-manage/deploy/self-managed/install-with-debian-package.md b/deploy-manage/deploy/self-managed/install-with-debian-package.md index 0316337fa7..24bc50babf 100644 --- a/deploy-manage/deploy/self-managed/install-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-with-debian-package.md @@ -9,39 +9,77 @@ mapped_pages: # Install with Debian package [deb] -The Debian package for Kibana can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install Kibana on any Debian-based system such as Debian and Ubuntu. +The Debian package for {{kib}} can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install {{kib}} on any Debian-based system such as Debian and Ubuntu. This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. -The latest stable version of Kibana can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). ## Import the Elastic PGP key [deb-key] -We sign all of our packages with the Elastic Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: +:::{include} _snippets/pgp-key.md +::: +```sh +wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg ``` -4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 + +## Install from the APT repository [deb-repo] + +You may need to install the `apt-transport-https` package on Debian before proceeding: + +```sh +sudo apt-get install apt-transport-https ``` -Download and install the public signing key: + +Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: ```sh -wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-9.x.list ``` +:::{warning} +Do not use `add-apt-repository` as it will add a `deb-src` entry as well, but we do not provide a source package. If you have added the `deb-src` entry, you will see an error like the following: -## Install from the APT repository [deb-repo] +``` +Unable to find expected entry 'main/source/Sources' in Release file +(Wrong sources.list entry or malformed file) +``` -Version 9.0.0-beta1 of Kibana has not yet been released. +Delete the `deb-src` entry from the `/etc/apt/sources.list` file and the installation should work as expected. +::: +You can install the {{kib}} Debian package with: + +```sh +sudo apt-get update && sudo apt-get install kibana +``` + +:::{warning} +If two entries exist for the same {{kib}} repository, you will see an error like this during `apt-get update`: + +``` +Duplicate sources.list entry https://artifacts.elastic.co/packages/8.x/apt/ ...` +``` + +Examine `/etc/apt/sources.list.d/kibana-8.x.list` for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. +::: ## Download and install the Debian package manually [install-deb] -Version 9.0.0-beta1 of Kibana has not yet been released. +The Debian package for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: +```sh +wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-amd64.deb +shasum -a 512 kibana-{{stack-version}}-amd64.deb <1> +sudo dpkg -i kibana-{{stack-version}}-amd64.deb +``` + +1. Compare the SHA produced by shasum with the [published SHA](https://artifacts.elastic.co/downloads/kibana/kibana-9.0.0-amd64.deb.sha512). +% version manually specified in the link above ## Start {{es}} and generate an enrollment token for {{kib}} [deb-enroll] - When you start {{es}} for the first time, the following security configuration occurs automatically: * Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. @@ -79,7 +117,7 @@ These commands provide no feedback as to whether {{kib}} was started successfull ## Configure {{kib}} via the config file [deb-configuring] -Kibana loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). ## Directory layout of Debian package [deb-layout] @@ -88,9 +126,9 @@ The Debian package places config files, logs, and the data directory in the appr | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Kibana home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | -| bin | Binary scripts including `kibana` to start the Kibana server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | +| home | {{kib}} home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | +| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | | config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure.md)` | -| data | The location of the data files written to disk by Kibana and its plugins | `/var/lib/kibana` | `path.data` | +| data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | diff --git a/deploy-manage/deploy/self-managed/install-with-docker.md b/deploy-manage/deploy/self-managed/install-with-docker.md index 628f8357e2..ca2b81dc09 100644 --- a/deploy-manage/deploy/self-managed/install-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-with-docker.md @@ -34,7 +34,7 @@ To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. For example: ```sh -docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 +docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} ``` @@ -54,26 +54,21 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 3. Pull the {{es}} Docker image. - ::::{warning} - Version 9.0.0-beta1 has not yet been released. No Docker image is currently available for {{es}} 9.0.0-beta1. - :::: - - ```sh - docker pull docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` 4. Optional: Install [Cosign](https://docs.sigstore.dev/system_config/installation/) for your environment. Then use Cosign to verify the {{es}} image’s signature. ```sh wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The `cosign` command prints the check results and the signature payload in JSON format: ```sh - Verification for docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 -- + Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- The following checks were performed on each of these signatures: - The cosign claims were validated - Existence of the claims in the transparency log was verified offline @@ -83,7 +78,7 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 5. Start an {{es}} container. ```sh - docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:9.0.0-beta1 + docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` ::::{tip} @@ -102,26 +97,21 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:9.0.0-beta1 7. Pull the {{kib}} Docker image. - ::::{warning} - Version 9.0.0-beta1 has not yet been released. No Docker image is currently available for {{kib}} 9.0.0-beta1. - :::: - - ```sh - docker pull docker.elastic.co/kibana/kibana:9.0.0-beta1 + docker pull docker.elastic.co/kibana/kibana:{{stack-version}} ``` 8. Optional: Verify the {{kib}} image’s signature. ```sh wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:9.0.0-beta1 + cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} ``` 9. Start a {{kib}} container. ```sh - docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:9.0.0-beta1 + docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} ``` 10. When {{kib}} starts, it outputs a unique generated link to the terminal. To access {{kib}}, open this link in a web browser. @@ -172,7 +162,7 @@ One way to configure {{kib}} on Docker is to provide `kibana.yml` via bind-mount version: '2' services: kibana: - image: docker.elastic.co/kibana/kibana:9.0.0-beta1 + image: docker.elastic.co/kibana/kibana:{{stack-version}} volumes: - ./kibana.yml:/usr/share/kibana/config/kibana.yml ``` @@ -183,14 +173,14 @@ services: By default, {{kib}} auto-generates a keystore file for secure settings at startup. To persist your [secure settings](../../security/secure-settings.md), use the `kibana-keystore` utility to bind-mount the parent directory of the keystore to the container. For example: ```sh -docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:9.0.0-beta1 bin/kibana-keystore create -docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:9.0.0-beta1 bin/kibana-keystore add test_keystore_setting +docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:{{stack-version}} bin/kibana-keystore create +docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:{{stack-version}} bin/kibana-keystore add test_keystore_setting ``` ### Environment variable configuration [environment-variable-config] -Under Docker, {{kib}} can be configured via environment variables. When the container starts, a helper process checks the environment for variables that can be mapped to Kibana command-line arguments. +Under Docker, {{kib}} can be configured via environment variables. When the container starts, a helper process checks the environment for variables that can be mapped to {{kib}} command-line arguments. For compatibility with container orchestration systems, these environment variables are written in all capitals, with underscores as word separators. The helper translates these names to valid {{kib}} setting names. @@ -202,7 +192,7 @@ All information that you include in environment variables is visible through the Some example translations are shown here: **Environment Variable** -: **Kibana Setting** +: **{{kib}} Setting** `SERVER_NAME` : `server.name` @@ -223,7 +213,7 @@ These variables can be set with `docker-compose` like this: version: '2' services: kibana: - image: docker.elastic.co/kibana/kibana:9.0.0-beta1 + image: docker.elastic.co/kibana/kibana:{{stack-version}} environment: SERVER_NAME: kibana.example.org ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]' diff --git a/deploy-manage/deploy/self-managed/install-with-rpm.md b/deploy-manage/deploy/self-managed/install-with-rpm.md index ce62ded7e8..c05e6133f7 100644 --- a/deploy-manage/deploy/self-managed/install-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-with-rpm.md @@ -9,7 +9,7 @@ mapped_pages: # Install with RPM [rpm] -The RPM for Kibana can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install Kibana on any RPM-based system such as OpenSuSE, SLES, Red Hat, and Oracle Enterprise. +The RPM for {{kib}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{kib}} on any RPM-based system such as OpenSuSE, SLES, Red Hat, and Oracle Enterprise. ::::{note} RPM install is not supported on distributions with old versions of RPM, such as SLES 11. Refer to [Install from archive on Linux or macOS](install-from-archive-on-linux-macos.md) instead. @@ -18,7 +18,7 @@ RPM install is not supported on distributions with old versions of RPM, such as This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. -The latest stable version of Kibana can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). ::::{tip} For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](installing-elasticsearch.md). @@ -27,12 +27,8 @@ For a step-by-step example of setting up the {{stack}} on your own premises, try ## Import the Elastic PGP key [rpm-key] -We sign all of our packages with the Elastic Signing Key (PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), available from [https://pgp.mit.edu](https://pgp.mit.edu)) with fingerprint: - -``` -4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 -``` -Download and install the public signing key: +:::{include} _snippets/pgp-key.md +::: ```sh rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch @@ -41,12 +37,44 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ## Installing from the RPM repository [rpm-repo] -Version 9.0.0-beta1 of Kibana has not yet been released. +Create a file called `kibana.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: + +```sh +[kibana-9.X] +name={{kib}} repository for 9.x packages +baseurl=https://artifacts.elastic.co/packages/9.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + +And your repository is ready for use. You can now install {{kib}} with one of the following commands: + +```sh +sudo yum install kibana <1> +sudo dnf install kibana <2> +sudo zypper install kibana <3> +``` + +1. Use yum on older Red Hat based distributions. +2. Use dnf on Fedora and other newer Red Hat distributions. +3. Use zypper on OpenSUSE based distributions ## Download and install the RPM manually [install-rpm] -Version 9.0.0-beta1 of Kibana has not yet been released. +The RPM for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: + +```sh +wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-x86_64.rpm +wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-x86_64.rpm.sha512 +shasum -a 512 -c kibana-{{stack-version}}-x86_64.rpm.sha512 <1> +sudo rpm --install kibana-{{stack-version}}-x86_64.rpm +``` + +1. Compares the SHA of the downloaded RPM and the published checksum, which should output `kibana-{version}-x86_64.rpm: OK`. ## Start {{es}} and generate an enrollment token for {{kib}} [rpm-enroll] @@ -89,7 +117,7 @@ These commands provide no feedback as to whether {{kib}} was started successfull ## Configure {{kib}} via the config file [rpm-configuring] -Kibana loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). ## Directory layout of RPM [rpm-layout] @@ -98,9 +126,9 @@ The RPM places config files, logs, and the data directory in the appropriate loc | Type | Description | Default Location | Setting | | --- | --- | --- | --- | -| home | Kibana home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | -| bin | Binary scripts including `kibana` to start the Kibana server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | +| home | {{kib}} home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | +| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | | config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure.md)` | -| data | The location of the data files written to disk by Kibana and its plugins | `/var/lib/kibana` | `path.data` | +| data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index b0ac787463..03c4c7e1aa 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -62,64 +62,47 @@ $$$install-stack-self-view-data$$$ * [/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md) * [/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md](/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md) - - If you want to install and manage {{es}} yourself, you can: -* Run {{es}} using a [Linux, MacOS, or Windows install package](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). -* Run {{es}} in a [Docker container](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). -* Set up and manage {{es}}, {{kib}}, {{agent}}, and the rest of the Elastic Stack on Kubernetes with [{{eck}}](https://www.elastic.co/guide/en/cloud-on-k8s/current). +* Run {{es}} using a [Linux, MacOS, or Windows install package](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). +* Run {{es}} in a [Docker container](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). ::::{tip} -To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see [Run Elasticsearch locally](../../../solutions/search/get-started.md). This setup is not suitable for production use. +To try out on your own machine, we recommend using Docker and running both {{es}} and Kibana. For more information, see [Run {{es}} locally](../../../solutions/search/get-started.md). This setup is not suitable for production use. :::: +## {{es}} install packages [elasticsearch-install-packages] - -## Elasticsearch install packages [elasticsearch-install-packages] - -Elasticsearch is provided in the following package formats: +{{es}} is provided in the following package formats: Linux and MacOS `tar.gz` archives : The `tar.gz` archives are available for installation on any Linux distribution and MacOS. - [Install {{es}} from archive on Linux or MacOS](../../../deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) + [Install {{es}} from archive on Linux or MacOS](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) Windows `.zip` archive : The `zip` archive is suitable for installation on Windows. - [Install {{es}} with `.zip` on Windows](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) + [Install {{es}} with `.zip` on Windows](/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) `deb` -: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elasticsearch website or from our Debian repository. +: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages can be downloaded from the {{es}} website or from our Debian repository. - [Install Elasticsearch with Debian Package](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) + [Install {{es}} with Debian Package](/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) `rpm` -: The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elasticsearch website or from our RPM repository. - - [Install Elasticsearch with RPM](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) - - -::::{tip} -For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md). -:::: - - - -## Elasticsearch container images [elasticsearch-docker-images] - -You can also run {{es}} inside a container image. +: The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPM packages can be downloaded from the {{es}} website or from our RPM repository. -`docker` -: Docker container images may be downloaded from the Elastic Docker Registry. + [Install {{es}} with RPM](/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) - [Install {{es}} with Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) +## {{es}} container images [elasticsearch-docker-images] +You can also run {{es}} inside a docket container image. Docker container images may be downloaded from the Elastic Docker Registry. +[Install {{es}} with Docker](/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) ## Java (JVM) Version [jvm-version] diff --git a/deploy-manage/deploy/self-managed/system-config-tcpretries.md b/deploy-manage/deploy/self-managed/system-config-tcpretries.md index 1b94ddeb34..8bbf4609c4 100644 --- a/deploy-manage/deploy/self-managed/system-config-tcpretries.md +++ b/deploy-manage/deploy/self-managed/system-config-tcpretries.md @@ -1,7 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config-tcpretries.html - +applies_to: + deployment: + self: --- # Decrease the TCP retransmission timeout [system-config-tcpretries] diff --git a/deploy-manage/toc.yml b/deploy-manage/toc.yml index 1fa538f020..de28479b23 100644 --- a/deploy-manage/toc.yml +++ b/deploy-manage/toc.yml @@ -347,21 +347,6 @@ toc: - file: deploy/self-managed/install-elasticsearch-with-docker.md - file: deploy/self-managed/local-development-installation-quickstart.md - file: deploy/self-managed/bootstrap-checks.md - children: - - file: deploy/self-managed/bootstrap-checks-heap-size.md - - file: deploy/self-managed/bootstrap-checks-file-descriptor.md - - file: deploy/self-managed/bootstrap-checks-memory-lock.md - - file: deploy/self-managed/max-number-threads-check.md - - file: deploy/self-managed/bootstrap-checks-max-file-size.md - - file: deploy/self-managed/max-size-virtual-memory-check.md - - file: deploy/self-managed/bootstrap-checks-max-map-count.md - - file: deploy/self-managed/bootstrap-checks-client-jvm.md - - file: deploy/self-managed/bootstrap-checks-serial-collector.md - - file: deploy/self-managed/bootstrap-checks-syscall-filter.md - - file: deploy/self-managed/bootstrap-checks-onerror.md - - file: deploy/self-managed/bootstrap-checks-early-access.md - - file: deploy/self-managed/bootstrap-checks-all-permission.md - - file: deploy/self-managed/bootstrap-checks-discovery-configuration.md - file: deploy/self-managed/configure-elasticsearch.md children: - file: deploy/self-managed/important-settings-configuration.md diff --git a/docset.yml b/docset.yml index f680c33475..d436b2180e 100644 --- a/docset.yml +++ b/docset.yml @@ -522,3 +522,4 @@ subs: icon-bug: "pass:[]" icon-checkInCircleFilled: "pass:[]" icon-warningFilled: "pass:[]" + stack-version: "9.0.0" diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md deleted file mode 100644 index 7a89c61d04..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks.md +++ /dev/null @@ -1,39 +0,0 @@ -# Bootstrap Checks [bootstrap-checks] - -Collectively, we have a lot of experience with users suffering unexpected issues because they have not configured [important settings](../../../deploy-manage/deploy/self-managed/important-settings-configuration.md). In previous versions of Elasticsearch, misconfiguration of some of these settings were logged as warnings. Understandably, users sometimes miss these log messages. To ensure that these settings receive the attention that they deserve, Elasticsearch has bootstrap checks upon startup. - -These bootstrap checks inspect a variety of Elasticsearch and system settings and compare them to values that are safe for the operation of Elasticsearch. If Elasticsearch is in development mode, any bootstrap checks that fail appear as warnings in the Elasticsearch log. If Elasticsearch is in production mode, any bootstrap checks that fail will cause Elasticsearch to refuse to start. - -There are some bootstrap checks that are always enforced to prevent Elasticsearch from running with incompatible settings. These checks are documented individually. - - -## Development vs. production mode [dev-vs-prod-mode] - -By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](../../../deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an Elasticsearch node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. - -Note that HTTP and transport can be configured independently via [`http.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) and [`transport.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings); this can be useful for configuring a single node to be reachable via HTTP for testing purposes without triggering production mode. - - -## Single-node discovery [single-node-discovery] - -We recognize that some users need to bind the transport to an external interface for testing a remote-cluster configuration. For this situation, we provide the discovery type `single-node` (configure it by setting `discovery.type` to `single-node`); in this situation, a node will elect itself master and will not join a cluster with any other node. - - -## Forcing the bootstrap checks [_forcing_the_bootstrap_checks] - -If you are running a single node in production, it is possible to evade the bootstrap checks (either by not binding transport to an external interface, or by binding transport to an external interface and setting the discovery type to `single-node`). For this situation, you can force execution of the bootstrap checks by setting the system property `es.enforce.bootstrap.checks` to `true` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). We strongly encourage you to do this if you are in this specific situation. This system property can be used to force execution of the bootstrap checks independent of the node configuration. - - - - - - - - - - - - - - - diff --git a/raw-migrated-files/toc.yml b/raw-migrated-files/toc.yml index f3770a93cb..451303deb1 100644 --- a/raw-migrated-files/toc.yml +++ b/raw-migrated-files/toc.yml @@ -262,8 +262,6 @@ toc: - file: elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md - file: elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md - file: elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md - - file: elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md - - file: elasticsearch/elasticsearch-reference/bootstrap-checks.md - file: elasticsearch/elasticsearch-reference/change-passwords-native-users.md - file: elasticsearch/elasticsearch-reference/configuring-stack-security.md - file: elasticsearch/elasticsearch-reference/data-management.md @@ -274,7 +272,6 @@ toc: - file: elasticsearch/elasticsearch-reference/how-monitoring-works.md - file: elasticsearch/elasticsearch-reference/index-modules-allocation.md - file: elasticsearch/elasticsearch-reference/index-modules-mapper.md - - file: elasticsearch/elasticsearch-reference/install-elasticsearch.md - file: elasticsearch/elasticsearch-reference/ip-filtering.md - file: elasticsearch/elasticsearch-reference/monitor-elasticsearch-cluster.md - file: elasticsearch/elasticsearch-reference/monitoring-overview.md From d370ff1d8cd1c0838c837e3faa0be833442f1f0b Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 10:49:05 -0500 Subject: [PATCH 04/43] more" --- deploy-manage/deploy/self-managed.md | 12 +++- .../_snippets/auto-security-config.md | 12 ++++ .../_snippets/check-es-running.md | 8 +-- .../{other-versions.md => es-releases.md} | 0 .../self-managed/_snippets/pw-env-var.md | 11 ++++ .../_snippets/start-security-enabled.md | 14 ---- .../deploy/self-managed/_snippets/systemd.md | 2 +- .../self-managed/_snippets/targz-start.md | 19 +----- .../_snippets/zip-windows-start.md | 16 ----- ...asticsearch-from-archive-on-linux-macos.md | 66 ++++++++++++------- ...stall-elasticsearch-with-debian-package.md | 9 ++- .../install-elasticsearch-with-rpm.md | 9 ++- ...stall-elasticsearch-with-zip-on-windows.md | 16 ++++- .../install-from-archive-on-linux-macos.md | 5 -- .../start-stop-elasticsearch.md | 35 ++-------- 15 files changed, 115 insertions(+), 119 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/auto-security-config.md rename deploy-manage/deploy/self-managed/_snippets/{other-versions.md => es-releases.md} (100%) create mode 100644 deploy-manage/deploy/self-managed/_snippets/pw-env-var.md delete mode 100644 deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 8f69e5ac21..5fe8d9bd7c 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -5,4 +5,14 @@ mapped_pages: # Self-managed cluster [dependencies-versions] -See [Elastic Stack Third-party Dependencices](https://artifacts.elastic.co/reports/dependencies/dependencies-current.md) for the complete list of dependencies for {{es}}. \ No newline at end of file +See [Elastic Stack Third-party Dependencices](https://artifacts.elastic.co/reports/dependencies/dependencies-current.md) for the complete list of dependencies for {{es}}. + + +```sh +{{stack-version}} +``` + +{{stack-version}} + +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch--linux-x86_64.tar.gz: OK`. +2. This directory is known as `$ES_HOME`. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md b/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md new file mode 100644 index 0000000000..f44ca254af --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md @@ -0,0 +1,12 @@ +When you start {{es}} for the first time, the following security configuration occurs automatically: + +* [Certificates and keys](../../../deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. +* The TLS configuration settings are written to `elasticsearch.yml`. +* A password is generated for the `elastic` user. +* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. + +You can then start {{kib}} and enter the enrollment token. This token automatically applies the security settings from your {{es}} cluster, authenticates to {{es}} with the built-in `kibana` service account, and writes the security configuration to `kibana.yml`. + +::::{note} +There are [some cases](../../../deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md index 3a140511bf..8b49e3484b 100644 --- a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md +++ b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md @@ -1,11 +1,11 @@ You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: ```sh -curl --cacert {{es-conf}}{{slash}}certs{{slash}}http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1> +curl --cacert {{es-conf}}{{slash}}certs{{slash}}http_ca.crt {{escape}} <1> +-u elastic:$ELASTIC_PASSWORD https://localhost:9200 <2> ``` - -1. Ensure that you use `https` in your call, or the request will fail.`--cacert` -: Path to the generated `http_ca.crt` certificate for the HTTP layer. +1. `--cacert`: Path to the generated `http_ca.crt` certificate for the HTTP layer. +2. Ensure that you use `https` in your call, or the request will fail. diff --git a/deploy-manage/deploy/self-managed/_snippets/other-versions.md b/deploy-manage/deploy/self-managed/_snippets/es-releases.md similarity index 100% rename from deploy-manage/deploy/self-managed/_snippets/other-versions.md rename to deploy-manage/deploy/self-managed/_snippets/es-releases.md diff --git a/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md new file mode 100644 index 0000000000..269ad49267 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md @@ -0,0 +1,11 @@ +The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. + +We recommend storing the `elastic` password as an environment variable in your shell. For example: + +```sh +{{export}}ELASTIC_PASSWORD="your_password" +``` + +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. + +To learn how to reset this password, refer to [](/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-sm.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md b/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md deleted file mode 100644 index 1ac869269e..0000000000 --- a/deploy-manage/deploy/self-managed/_snippets/start-security-enabled.md +++ /dev/null @@ -1,14 +0,0 @@ -When installing {es}, security features are enabled and configured by default. - -When you start {{es}} for the first time, the following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. - -The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. - -We recommend storing the `elastic` password as an environment variable in your shell. For example: - -```sh -export ELASTIC_PASSWORD="your_password" -``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd.md b/deploy-manage/deploy/self-managed/_snippets/systemd.md index 5de8d1943c..73a6f713cc 100644 --- a/deploy-manage/deploy/self-managed/_snippets/systemd.md +++ b/deploy-manage/deploy/self-managed/_snippets/systemd.md @@ -47,7 +47,7 @@ sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. -::::{admonition} Startup timeouts with older `systemd` versions +::::{admonition} Startup timeouts with older systemd versions :class: tip By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-start.md b/deploy-manage/deploy/self-managed/_snippets/targz-start.md index 7c50d49b49..b93f5a177c 100644 --- a/deploy-manage/deploy/self-managed/_snippets/targz-start.md +++ b/deploy-manage/deploy/self-managed/_snippets/targz-start.md @@ -3,24 +3,7 @@ Run the following command to start {{es}} from the command line: ```sh ./bin/elasticsearch ``` - -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. - -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -export ELASTIC_PASSWORD="your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. - -By default {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. +By default, {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. To stop {{es}}, press `Ctrl-C`. diff --git a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md index 76d6c23bc1..1314933873 100644 --- a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md +++ b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md @@ -4,22 +4,6 @@ Run the following command to start {{es}} from the command line: .\bin\elasticsearch.bat ``` -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. - -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -$ELASTIC_PASSWORD = "your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. - By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. To stop {{es}}, press `Ctrl-C`. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index da2f0c1c82..7c50aeed32 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -4,6 +4,8 @@ mapped_pages: sub: es-conf: "$ES_HOME/config" slash: "/" + export: "export" + escape: "\\" navigation_title: "Linux or MacOS" --- @@ -14,7 +16,7 @@ navigation_title: "Linux or MacOS" :::{include} _snippets/trial.md ::: -:::{include} _snippets/other-versions.md +:::{include} _snippets/es-releases.md ::: ::::{note} @@ -22,7 +24,11 @@ navigation_title: "Linux or MacOS" :::: -## Download and install archive for Linux [install-linux] +## Step 1: Download and install the archive + +Download and install the archive for Linux or MacOS. + +### Linux [install-linux] The Linux archive for {{es}} {{stack-version}} can be downloaded and installed as follows: @@ -34,12 +40,24 @@ tar -xzf elasticsearch-{{stack-version}}-linux-x86_64.tar.gz cd elasticsearch-{{stack-version}}/ <2> ``` -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch-{{stack-version}}-linux-x86_64.tar.gz: OK`. +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch--linux-x86_64.tar.gz: OK`. 2. This directory is known as `$ES_HOME`. -## Download and install archive for MacOS [install-macos] +### MacOS [install-macos] + +The MacOS archive for {{es}} {{stack-version}} can be downloaded and installed as follows: + +```sh +curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz +curl https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> +tar -xzf elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz +cd elasticsearch-{{stack-version}}/ <2> +``` + +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch--darwin-x86_64.tar.gz: OK`. +2. This directory is known as `$ES_HOME`. ::::{admonition} macOS Gatekeeper warnings :class: important @@ -53,45 +71,47 @@ xattr -d -r com.apple.quarantine ``` Alternatively, you can add a security override by following the instructions in the *If you want to open an app that hasn’t been notarized or is from an unidentified developer* section of [Safely open apps on your Mac](https://support.apple.com/en-us/HT202491). - :::: +## Step 2: Enable automatic creation of system indices [targz-enable-indices] -The MacOS archive for {{es}} {{stack-version}} can be downloaded and installed as follows: +:::{include} _snippets/enable-auto-indices.md +::: -```sh -curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz -curl https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> -tar -xzf elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz -cd elasticsearch-{{stack-version}}/ <2> -``` +## Step 3: Start {{es}} [targz-running] -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch-{{version}}-darwin-x86_64.tar.gz: OK`. -2. This directory is known as `$ES_HOME`. +You have several options for starting {{es}} -## Enable automatic creation of system indices [targz-enable-indices] +* [Run from the command line](#command-line) +* [Run the node to be enrolled in an existing cluster](#existing-cluster) +* [Run as a daemon](#setup-installation-daemon) -:::{include} _snippets/enable-auto-indices.md +### Run {{es}} from the command line [command-line] + +:::{include} _snippets/targz-start.md ::: -## Run {{es}} from the command line [targz-running] +#### Security at startup [security-at-startup] -:::{include} _snippets/targz-start.md +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md ::: -### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] +### Enroll the node in an existing cluster [existing-cluster] :::{include} _snippets/enroll-nodes.md ::: -## Check that {{es}} is running [_check_that_elasticsearch_is_running] +### Run as a daemon [setup-installation-daemon] -:::{include} _snippets/check-es-running.md +:::{include} _snippets/targz-daemon.md ::: -## Run as a daemon [setup-installation-daemon] +## Step 4: Check that {{es}} is running [_check_that_elasticsearch_is_running] -:::{include} _snippets/targz-daemon.md +:::{include} _snippets/check-es-running.md ::: ## Configure {{es}} on the command line [targz-configuring] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 87cbbd85f2..445e293600 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -5,6 +5,8 @@ sub: es-conf: "/etc/elasticsearch" slash: "/" distro: "Debian" + export: "export" + escape: "\\" navigation_title: Debian --- @@ -15,7 +17,7 @@ The Debian package for {{es}} can be [downloaded from our website](#install-deb) :::{include} _snippets/trial.md ::: -:::{include} _snippets/other-versions.md +:::{include} _snippets/es-releases.md ::: ::::{note} @@ -93,7 +95,10 @@ sudo dpkg -i elasticsearch-{{stack-version}}-amd64.deb ## Start {{es}} with security enabled [deb-security-configuration] -:::{include} _snippets/start-security-enabled.md +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md ::: ### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 20c4765934..79edb1d7bd 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -5,6 +5,8 @@ sub: es-conf: "/etc/elasticsearch" slash: "/" distro: "RPM" + export: "export" + escape: "\\" navigation_title: "RPM" --- @@ -19,7 +21,7 @@ RPM install is not supported on distributions with old versions of RPM, such as :::{include} _snippets/trial.md ::: -:::{include} _snippets/other-versions.md +:::{include} _snippets/es-releases.md ::: ::::{note} @@ -61,7 +63,10 @@ sudo rpm --install elasticsearch-{{stack-version}}-x86_64.rpm ## Start {{es}} with security enabled [rpm-security-configuration] -:::{include} _snippets/start-security-enabled.md +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md ::: ### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster_2] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 17a4dee345..472022ff87 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -4,6 +4,8 @@ mapped_pages: sub: es-conf: "%ES_HOME%\\config" slash: "\\" + export: "$" + escape: "^" navigation_title: Windows --- @@ -18,7 +20,7 @@ navigation_title: Windows On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. :::: -:::{include} _snippets/other-versions.md +:::{include} _snippets/es-releases.md ::: ::::{note} @@ -31,7 +33,7 @@ On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime % link url manually set Download the `.zip` archive for {{es}} {{stack-version}} from: [https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-windows-x86_64.zip) -Unzip it with your favorite unzip tool. This will create a folder called `elasticsearch-{{stack-version}}`, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: +Unzip it with your favorite unzip tool. This will create a folder called `elasticsearch-`, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: ```sh cd C:\Program Files\elasticsearch-{{stack-version}} @@ -47,6 +49,14 @@ cd C:\Program Files\elasticsearch-{{stack-version}} :::{include} _snippets/zip-windows-start.md ::: +### Security at startup [security-at-startup] + +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md +::: + ### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] :::{include} _snippets/enroll-nodes.md @@ -54,7 +64,7 @@ cd C:\Program Files\elasticsearch-{{stack-version}} ## Configure {{es}} on the command line [windows-configuring] -{{es}} loads its configuration from the `%ES_HOME%\config\elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). +{{es}} loads its configuration from the `%ES_HOME%\config\elasticsearch.yml` file by default. The format of this config file is explained in [](configure-elasticsearch.md). Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: diff --git a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md index 72e43e9b41..26cfd34349 100644 --- a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md @@ -17,7 +17,6 @@ The latest stable version of {{kib}} can be found on the [Download Kibana](https ::::{note} macOS is supported for development purposes only and is not covered under the support SLA for [production-supported operating systems](https://www.elastic.co/support/matrix#kibana). - :::: @@ -66,7 +65,6 @@ cd kibana-{{stack-version}}/ <2> ## Start {{es}} and generate an enrollment token for {{kib}} [targz-enroll] - When you start {{es}} for the first time, the following security configuration occurs automatically: * [Certificates and keys](installing-elasticsearch.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. @@ -76,7 +74,6 @@ When you start {{es}} for the first time, the following security configuration o You can then start {{kib}} and enter the enrollment token to securely connect {{kib}} with {{es}}. The enrollment token is valid for 30 minutes. - ## Run {{kib}} from the command line [targz-running] {{kib}} can be started from the command line as follows: @@ -98,8 +95,6 @@ If you need to reset the password for the `elastic` user or other built-in users :::: - - ## Configure {{kib}} via the config file [targz-configuring] {{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index 27cd822ad2..bc9dc901af 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -21,35 +21,10 @@ If you installed {{es}} with a `.tar.gz` package, you can start {{es}} from the #### Run {{es}} from the command line [_run_es_from_the_command_line] -Run the following command to start {{es}} from the command line: - -```sh -./bin/elasticsearch -``` - -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. +:::{include} /deploy-manage/deploy/self-managed/_snippets/targz-start.md +::: -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -export ELASTIC_PASSWORD="your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../../deploy-manage/security/secure-settings.md) for more details. - -By default {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](../../../deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. - -To stop {{es}}, press `Ctrl-C`. - -::::{note} -All scripts packaged with {{es}} require a version of Bash that supports arrays and assume that Bash is available at `/bin/bash`. As such, Bash should be available at this path either directly or via a symbolic link. -:::: +If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md#security-at-startup). #### Run as a daemon [_run_as_a_daemon] @@ -158,7 +133,7 @@ sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. -::::{admonition} Startup timeouts with older `systemd` versions +::::{admonition} Startup timeouts with older systemd versions :class: tip By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. @@ -242,7 +217,7 @@ sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. -::::{admonition} Startup timeouts with older `systemd` versions +::::{admonition} Startup timeouts with older systemd versions :class: tip By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. From c79b7f5810a1a94fd4fde8a5540f8d6490bc3e52 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 12:14:08 -0500 Subject: [PATCH 05/43] more subs --- deploy-manage/deploy/self-managed.md | 2 ++ .../install-elasticsearch-from-archive-on-linux-macos.md | 3 ++- .../self-managed/install-elasticsearch-with-debian-package.md | 3 ++- .../deploy/self-managed/install-elasticsearch-with-docker.md | 2 ++ .../deploy/self-managed/install-elasticsearch-with-rpm.md | 1 + .../self-managed/install-elasticsearch-with-zip-on-windows.md | 1 + .../deploy/self-managed/install-from-archive-on-linux-macos.md | 2 ++ docset.yml | 1 - 8 files changed, 12 insertions(+), 3 deletions(-) diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 5fe8d9bd7c..74e7b2553f 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -1,6 +1,8 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/dependencies-versions.html +sub: + stack-version: "9.0.0" --- # Self-managed cluster [dependencies-versions] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index 7c50aeed32..33293042b9 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -4,8 +4,9 @@ mapped_pages: sub: es-conf: "$ES_HOME/config" slash: "/" - export: "export" + export: "export " escape: "\\" + stack-version: "9.0.0" navigation_title: "Linux or MacOS" --- diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 445e293600..e00791c5e6 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -5,8 +5,9 @@ sub: es-conf: "/etc/elasticsearch" slash: "/" distro: "Debian" - export: "export" + export: "export " escape: "\\" + stack-version: "9.0.0" navigation_title: Debian --- diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 3a323e2aa7..0370b98082 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -2,6 +2,8 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html navigation_title: "Docker" +sub: + stack-version: "9.0.0" --- # Install {{es}} with Docker [docker] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 79edb1d7bd..aebdc6c6ff 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -7,6 +7,7 @@ sub: distro: "RPM" export: "export" escape: "\\" + stack-version: "9.0.0" navigation_title: "RPM" --- diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 472022ff87..8df9ca1bcf 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -6,6 +6,7 @@ sub: slash: "\\" export: "$" escape: "^" + stack-version: "9.0.0" navigation_title: Windows --- diff --git a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md index 26cfd34349..9d4f98098d 100644 --- a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md @@ -2,6 +2,8 @@ navigation_title: "Install from archive on Linux or macOS" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/targz.html +sub: + stack-version: "9.0.0" --- diff --git a/docset.yml b/docset.yml index d436b2180e..f680c33475 100644 --- a/docset.yml +++ b/docset.yml @@ -522,4 +522,3 @@ subs: icon-bug: "pass:[]" icon-checkInCircleFilled: "pass:[]" icon-warningFilled: "pass:[]" - stack-version: "9.0.0" From 9aaa01a38ffa774dc9b1b8cc5d00fa7a9cb73fd3 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 17:16:15 -0500 Subject: [PATCH 06/43] so much stuff --- .../self-managed/_snippets/cmd-line-config.md | 21 + .../_snippets/etc-elasticsearch.md | 23 +- .../self-managed/_snippets/java-version.md | 3 + .../deploy/self-managed/_snippets/prereqs.md | 4 + .../self-managed/_snippets/security-files.md | 2 +- .../_snippets/skip-set-kernel-params.md | 2 +- .../self-managed/_snippets/start-local.md | 46 ++ .../self-managed/_snippets/systemd-journal.md | 23 + .../_snippets/systemd-startup-timeout.md | 26 + .../self-managed/_snippets/systemd-startup.md | 6 + .../deploy/self-managed/_snippets/systemd.md | 60 +- .../deploy/self-managed/_snippets/wolfi.md | 9 + .../important-system-configuration.md | 3 + .../install-elasticsearch-docker-basic.md | 184 ++++++ .../install-elasticsearch-docker-compose.md | 92 +++ .../install-elasticsearch-docker-configure.md | 149 +++++ .../install-elasticsearch-docker-prod.md | 199 ++++++ ...asticsearch-from-archive-on-linux-macos.md | 49 +- ...stall-elasticsearch-with-debian-package.md | 95 ++- .../install-elasticsearch-with-docker.md | 612 +----------------- .../install-elasticsearch-with-rpm.md | 88 ++- ...stall-elasticsearch-with-zip-on-windows.md | 192 +++--- .../self-managed/setting-system-settings.md | 2 +- .../start-stop-elasticsearch.md | 177 ++--- deploy-manage/toc.yml | 5 + solutions/search/run-elasticsearch-locally.md | 50 +- 26 files changed, 1091 insertions(+), 1031 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/java-version.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/prereqs.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/start-local.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/systemd-journal.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/systemd-startup-timeout.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/systemd-startup.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/wolfi.md create mode 100644 deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md create mode 100644 deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md create mode 100644 deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md create mode 100644 deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md diff --git a/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md new file mode 100644 index 0000000000..7fa3ebbb9b --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md @@ -0,0 +1,21 @@ +{{es}} loads its configuration from the following location by default: + +``` +{{es-conf}}{{slash}}elasticsearch.yml +``` + +The format of this config file is explained in [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). + +Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: + +```sh +.\bin\elasticsearch.bat -Ecluster.name=my_cluster -Enode.name=node_1 +``` + +:::{note} +Values that contain spaces must be surrounded with quotes. For instance `-Epath.logs="C:\My Logs\logs"`. +::: + +:::{tip} +Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md b/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md index 44c6cc38d6..9aebf8b443 100644 --- a/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/_snippets/etc-elasticsearch.md @@ -1,23 +1,18 @@ The `/etc/elasticsearch` directory contains the default runtime configuration for {{es}}. The ownership of this directory and all contained files are set to `root:elasticsearch` on package installations. -The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](../../security/secure-settings.md), requires `root:elasticsearch` permissions. +The `setgid` flag applies group permissions on the `/etc/elasticsearch` directory to ensure that {{es}} can read any contained files and subdirectories. All files and subdirectories inherit the `root:elasticsearch` ownership. Running commands from this directory or any subdirectories, such as the [elasticsearch-keystore tool](/deploy-manage/security/secure-settings.md), requires `root:elasticsearch` permissions. -{{es}} loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). +{{es}} loads its configuration from the `/etc/elasticsearch/elasticsearch.yml` file by default. The format of this config file is explained in [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). The {{distro}} package also has a system configuration file (`/etc/sysconfig/elasticsearch`), which allows you to set the following parameters: -`ES_JAVA_HOME` -: Set a custom Java path to be used. - -`ES_PATH_CONF` -: Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. - -`ES_JAVA_OPTS` -: Any additional JVM system properties you may want to apply. - -`RESTART_ON_UPGRADE` -: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your {{es}} instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. +| Parameter | Description | +| --- | --- | +| `ES_JAVA_HOME` | Set a custom Java path to be used. | +| `ES_PATH_CONF` | Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. | +| `ES_JAVA_OPTS` | Any additional JVM system properties you may want to apply. | +| `RESTART_ON_UPGRADE` | Configure restart on package upgrade, defaults to `false`. This means you will have to restart your {{es}} instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. | ::::{note} -Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](setting-system-settings.md#systemd) for more information. +Distributions that use `systemd` require that system resource limits be configured via `systemd` rather than via the `/etc/sysconfig/elasticsearch` file. See [Systemd configuration](/deploy-manage/deploy/self-managed/setting-system-settings.md#systemd) for more information. :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/java-version.md b/deploy-manage/deploy/self-managed/_snippets/java-version.md new file mode 100644 index 0000000000..782fa70c58 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/java-version.md @@ -0,0 +1,3 @@ +::::{note} +{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version). +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/prereqs.md b/deploy-manage/deploy/self-managed/_snippets/prereqs.md new file mode 100644 index 0000000000..8a5188ef5b --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/prereqs.md @@ -0,0 +1,4 @@ +Before you install {{es}}, do the following: + +* Review the [supported operating systems](https://www.elastic.co/support/matrix). {{es}} is tested on the listed platforms, but it is possible that it will work on other platforms too. +* Configure your operating system using the [](/deploy-manage/deploy/self-managed/important-system-configuration.md) guidelines. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/security-files.md b/deploy-manage/deploy/self-managed/_snippets/security-files.md index 8aae4c831b..fda688ea6e 100644 --- a/deploy-manage/deploy/self-managed/_snippets/security-files.md +++ b/deploy-manage/deploy/self-managed/_snippets/security-files.md @@ -1,4 +1,4 @@ -When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. +When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory. These files are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. `http_ca.crt` : The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. diff --git a/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md b/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md index db758386bd..6f5005db7e 100644 --- a/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md +++ b/deploy-manage/deploy/self-managed/_snippets/skip-set-kernel-params.md @@ -1,3 +1,3 @@ ::::{note} -On systemd-based distributions, the installation scripts will attempt to set kernel parameters (e.g., `vm.max_map_count`); you can skip this by masking the systemd-sysctl.service unit. +On systemd-based distributions, the installation scripts will attempt to set kernel parameters (e.g., `vm.max_map_count`). You can skip this by masking the `systemd-sysctl.service` unit. :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/start-local.md b/deploy-manage/deploy/self-managed/_snippets/start-local.md new file mode 100644 index 0000000000..f980c71db6 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/start-local.md @@ -0,0 +1,46 @@ +::::{warning} +**DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS** + +The instructions on this page are for **local development only**. Do not use this configuration for production deployments, because it is not secure. Refer to [deployment options](../../get-started/deployment-options.md) for a list of production deployment options. + +:::: + +Quickly set up {{es}} and {{kib}} in Docker for local development or testing, using the [`start-local` script](https://github.com/elastic/start-local?tab=readme-ov-file#-try-elasticsearch-and-kibana-locally). + +This setup comes with a one-month trial license that includes all Elastic features. After the trial period, the license reverts to **Free and open - Basic**. Refer to [Elastic subscriptions](https://www.elastic.co/subscriptions) for more information. + +## Prerequisites [local-dev-prerequisites] + +* If you don’t have Docker installed, [download and install Docker Desktop](https://www.docker.com/products/docker-desktop) for your operating system. +* If you’re using Microsoft Windows, then install [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install). + +## Run `start-local` script [local-dev-quick-start] + +To set up {{es}} and {{kib}} locally, run the `start-local` script: + +```sh +curl -fsSL https://elastic.co/start-local | sh +``` + +This script creates an `elastic-start-local` folder containing configuration files and starts both {{es}} and {{kib}} using Docker. + +After running the script, you can access Elastic services at the following endpoints: + +* **{{es}}**: [http://localhost:9200](http://localhost:9200) +* **{{kib}}**: [http://localhost:5601](http://localhost:5601) + +The script generates a random password for the `elastic` user, and an API key, stored in the `.env` file. + +::::{warning} +This setup is for local testing only. HTTPS is disabled, and Basic authentication is used for {{es}}. For security, {{es}} and {{kib}} are accessible only through `localhost`. + +:::: + +## Learn more [local-dev-additional-info] + +For more detailed information about the `start-local` setup, refer to the [README on GitHub](https://github.com/elastic/start-local). Learn about customizing the setup, logging, and more. + + +## Next steps [local-dev-next-steps] + +Use our [quick start guides](https://www.elastic.co/guide/en/elasticsearch/reference/current/quickstart.html) to learn the basics of {{es}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd-journal.md b/deploy-manage/deploy/self-managed/_snippets/systemd-journal.md new file mode 100644 index 0000000000..118de5aba9 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/systemd-journal.md @@ -0,0 +1,23 @@ +By default, the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. + +When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: + +To tail the journal: + +```sh +sudo journalctl -f +``` + +To list journal entries for the elasticsearch service: + +```sh +sudo journalctl --unit elasticsearch +``` + +To list journal entries for the elasticsearch service starting from a given time: + +```sh +sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" +``` + +Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd-startup-timeout.md b/deploy-manage/deploy/self-managed/_snippets/systemd-startup-timeout.md new file mode 100644 index 0000000000..9c58340ef7 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/systemd-startup-timeout.md @@ -0,0 +1,26 @@ +::::{admonition} Startup timeouts with older systemd versions +:class: tip + +By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd`, then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. + +Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: + +```text +[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... +... +[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... +``` + +However the `systemd` logs will report that the startup timed out: + +```text +Jan 31 01:22:30 debian systemd[1]: Starting {{es}}... +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM +Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. +Jan 31 01:37:15 debian systemd[1]: Failed to start {{es}}. +``` + +To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. + +:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd-startup.md b/deploy-manage/deploy/self-managed/_snippets/systemd-startup.md new file mode 100644 index 0000000000..c4164ab51b --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/systemd-startup.md @@ -0,0 +1,6 @@ +To configure {{es}} to start automatically when the system boots up, run the following commands: + +```sh +sudo /bin/systemctl daemon-reload +sudo /bin/systemctl enable elasticsearch.service +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/systemd.md b/deploy-manage/deploy/self-managed/_snippets/systemd.md index 73a6f713cc..aa18a1a88a 100644 --- a/deploy-manage/deploy/self-managed/_snippets/systemd.md +++ b/deploy-manage/deploy/self-managed/_snippets/systemd.md @@ -1,10 +1,3 @@ -To configure {{es}} to start automatically when the system boots up, run the following commands: - -```sh -sudo /bin/systemctl daemon-reload -sudo /bin/systemctl enable elasticsearch.service -``` - {{es}} can be started and stopped as follows: ```sh @@ -21,55 +14,4 @@ echo "keystore_password" > /path/to/my_pwd_file.tmp chmod 600 /path/to/my_pwd_file.tmp sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp sudo systemctl start elasticsearch.service -``` - -By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. - -When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: - -To tail the journal: - -```sh -sudo journalctl -f -``` - -To list journal entries for the elasticsearch service: - -```sh -sudo journalctl --unit elasticsearch -``` - -To list journal entries for the elasticsearch service starting from a given time: - -```sh -sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" -``` - -Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.md) for more command line options. - -::::{admonition} Startup timeouts with older systemd versions -:class: tip - -By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. - -Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: - -```text -[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... -... -[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... -``` - -However the `systemd` logs will report that the startup timed out: - -```text -Jan 31 01:22:30 debian systemd[1]: Starting {{es}}... -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start {{es}}. -``` - -To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. - -:::: \ No newline at end of file +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/wolfi.md b/deploy-manage/deploy/self-managed/_snippets/wolfi.md new file mode 100644 index 0000000000..5c84753980 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/wolfi.md @@ -0,0 +1,9 @@ +You can also use the hardened [Wolfi](https://wolfi.dev/) image for additional security. Using Wolfi images requires Docker version 20.10.10 or higher. + +To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. + +For example: + +```sh +docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/important-system-configuration.md b/deploy-manage/deploy/self-managed/important-system-configuration.md index 64486b8798..c027afdd98 100644 --- a/deploy-manage/deploy/self-managed/important-system-configuration.md +++ b/deploy-manage/deploy/self-managed/important-system-configuration.md @@ -21,6 +21,9 @@ The following settings **must** be considered before going to production: * [](executable-jna-tmpdir.md) (Linux only) * [](system-config-tcpretries.md) (Linux only) +:::{tip} +For examples of applying these settings in a Docker environment, refer to [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md). +::: ## Development mode vs. production mode [dev-vs-prod] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md new file mode 100644 index 0000000000..5b1f8271d8 --- /dev/null +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md @@ -0,0 +1,184 @@ +--- +sub: + stack-version: "9.0.0" +applies_to: + deployment: + self: +navigation_title: Single-node cluster +--- + +# Start a single-node cluster in Docker [docker-cli-run-dev-mode] + +Use Docker commands to start a single-node {{es}} cluster for development or testing. You can then run additional Docker commands to add nodes to the test cluster or run {{kib}}. + +::::{tip} +* If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. +* This setup doesn’t run multiple {{es}} nodes or {{kib}} by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. See [Start a multi-node cluster with Docker Compose](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md). +:::: + + +## Hardened Docker images [docker-wolfi-hardened-image] + +:::{include} _snippets/wolfi.md +::: + +## Start a single-node cluster [_start_a_single_node_cluster] + +1. Install Docker. Visit [Get Docker](https://docs.docker.com/get-docker/) to install Docker for your environment. + + If using Docker Desktop, make sure to allocate at least 4GB of memory. You can adjust memory usage in Docker Desktop by going to **Settings > Resources**. + +2. Create a new docker network. + + ```sh + docker network create elastic + ``` + +3. Pull the {{es}} Docker image. + + ```sh + docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} + ``` + +4. Optional: Install [Cosign](https://docs.sigstore.dev/cosign/system_config/installation/) for your environment. Then use Cosign to verify the {{es}} image’s signature. + + $$$docker-verify-signature$$$ + + ```sh + wget https://artifacts.elastic.co/cosign.pub + cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} + ``` + + The `cosign` command prints the check results and the signature payload in JSON format: + + ```sh + Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- + The following checks were performed on each of these signatures: + - The cosign claims were validated + - Existence of the claims in the transparency log was verified offline + - The signatures were verified against the specified public key + ``` + +5. Start an {{es}} container. + + ```sh + docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} + ``` + + ::::{tip} + Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](#docker-set-heap-size). + :::: + + + {{ml-cap}} features such as [semantic search with ELSER](/solutions/search/semantic-search/semantic-search-elser-ingest-pipelines.md) require a larger container with more than 1GB of memory. If you intend to use the {{ml}} capabilities, then start the container with this command: + + ```sh + docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} + ``` + + The command prints the `elastic` user password and an enrollment token for {{kib}}. + +6. Copy the generated `elastic` password and enrollment token. These credentials are only shown when you start {{es}} for the first time. You can regenerate the credentials using the following commands. + + ```sh + docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic + docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana + ``` + + We recommend storing the `elastic` password as an environment variable in your shell. Example: + + ```sh + export ELASTIC_PASSWORD="your_password" + ``` + +7. Copy the `http_ca.crt` SSL certificate from the container to your local machine. + + ```sh + docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . + ``` + +8. Make a REST API call to {{es}} to ensure the {{es}} container is running. + + ```sh + curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 + ``` + +## Add more nodes [_add_more_nodes] + +1. Use an existing node to generate a enrollment token for the new node. + + ```sh + docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node + ``` + + The enrollment token is valid for 30 minutes. + +2. Start a new {{es}} container. Include the enrollment token as an environment variable. + + ```sh + docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} + ``` + +3. Call the [cat nodes API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) to verify the node was added to the cluster. + + ```sh + curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes + ``` + +## Run {{kib}} [run-kibana-docker] + +1. Pull the {{kib}} Docker image. + + ```sh + docker pull docker.elastic.co/kibana/kibana:{{stack-version}} + ``` + +2. Optional: Verify the {{kib}} image’s signature. + + ```sh + wget https://artifacts.elastic.co/cosign.pub + cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} + ``` + +3. Start a {{kib}} container. + + ```sh + docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} + ``` + +4. When {{kib}} starts, it outputs a unique generated link to the terminal. To access {{kib}}, open this link in a web browser. +5. In your browser, enter the enrollment token that was generated when you started {{es}}. + + To regenerate the token, run: + + ```sh + docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana + ``` + +6. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. + + To regenerate the password, run: + + ```sh + docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic + ``` + +## Remove containers [remove-containers-docker] + +To remove the containers and their network, run: + +```sh +# Remove the Elastic network +docker network rm elastic + +# Remove {{es}} containers +docker rm es01 +docker rm es02 + +# Remove the {{kib}} container +docker rm kib01 +``` + +## Next steps [_next_steps_5] + +You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md) to apply when running {{es}} in Docker in production. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md new file mode 100644 index 0000000000..a78bc2dfee --- /dev/null +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md @@ -0,0 +1,92 @@ +--- +sub: + stack-version: "9.0.0" +applies_to: + deployment: + self: +navigation_title: Multi-node cluster +--- + +# Start a multi-node cluster with Docker Compose [docker-compose-file] + +Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Compose lets you start multiple containers with a single command. + +## Hardened Docker images [docker-wolfi-hardened-image] + +:::{include} _snippets/wolfi.md +::: + +## Configure and start the cluster [_configure_and_start_the_cluster] + +1. Install Docker Compose. Visit the [Docker Compose docs](https://docs.docker.com/compose/install/) to install Docker Compose for your environment. + + If you’re using Docker Desktop, Docker Compose is installed automatically. Make sure to allocate at least 4GB of memory to Docker Desktop. You can adjust memory usage in Docker Desktop by going to **Settings > Resources**. + +2. Create or navigate to an empty directory for the project. +3. Download and save the following files in the project directory: + + * [`.env`](https://github.com/elastic/elasticsearch/blob/master/docs/reference/setup/install/docker/.env) + * [`docker-compose.yml`](https://github.com/elastic/elasticsearch/blob/master/docs/reference/setup/install/docker/docker-compose.yml) + +4. In the `.env` file, specify a password for the `ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. + + The passwords must be alphanumeric and can’t contain special characters, such as `!` or `@`. The bash script included in the `docker-compose.yml` file only works with alphanumeric characters. Example: + + ```txt + # Password for the 'elastic' user (at least 6 characters) + ELASTIC_PASSWORD=changeme + + # Password for the 'kibana_system' user (at least 6 characters) + KIBANA_PASSWORD=changeme + ... + ``` + +5. In the `.env` file, set `STACK_VERSION` to the current {{stack}} version. + + ```txt + ... + # Version of Elastic products + STACK_VERSION={{stack-version}} + ... + ``` + +6. By default, the Docker Compose configuration exposes port `9200` on all network interfaces. + + To avoid exposing port `9200` to external hosts, set `ES_PORT` to `127.0.0.1:9200` in the `.env` file. This ensures {{es}} is only accessible from the host machine. + + ```txt + ... + # Port to expose {{es}} HTTP API to the host + #ES_PORT=9200 + ES_PORT=127.0.0.1:9200 + ... + ``` + +7. To start the cluster, run the following command from the project directory. + + ```sh + docker-compose up -d + ``` + +8. After the cluster has started, open [http://localhost:5601](http://localhost:5601) in a web browser to access {{kib}}. +9. Log in to {{kib}} as the `elastic` user using the `ELASTIC_PASSWORD` you set earlier. + + +## Stop and remove the cluster [_stop_and_remove_the_cluster] + +To stop the cluster, run `docker-compose down`. The data in the Docker volumes is preserved and loaded when you restart the cluster with `docker-compose up`. + +```sh +docker-compose down +``` + +To delete the network, containers, and volumes when you stop the cluster, specify the `-v` option: + +```sh +docker-compose down -v +``` + + +## Next steps [_next_steps_6] + +You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](#docker-prod-prerequisites) to apply when running {{es}} in Docker in production. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md new file mode 100644 index 0000000000..e2823664d5 --- /dev/null +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -0,0 +1,149 @@ +--- +sub: + stack-version: "9.0.0" +applies_to: + deployment: + self: +navigation_title: Configure +--- + +# Configure {{es}} with Docker [docker-configuration-methods] + +When you run in Docker, the [{{es}} configuration files](configure-elasticsearch.md#config-files-location) are loaded from `/usr/share/elasticsearch/config/`. + +To use custom configuration files, you [bind-mount the files](#docker-config-bind-mount) over the configuration files in the image. + +You can set individual {{es}} configuration parameters using Docker environment variables. The [sample compose file](#docker-compose-file) and the [single-node example](#docker-cli-run-dev-mode) use this method. You can use the setting name directly as the environment variable name. If you cannot do this, for example because your orchestration platform forbids periods in environment variable names, then you can use an alternative style by converting the setting name as follows. + +1. Change the setting name to uppercase +2. Prefix it with `ES_SETTING_` +3. Escape any underscores (`_`) by duplicating them +4. Convert all periods (`.`) to underscores (`_`) + +For example, `-e bootstrap.memory_lock=true` becomes `-e ES_SETTING_BOOTSTRAP_MEMORY__LOCK=true`. + +You can use the contents of a file to set the value of the `ELASTIC_PASSWORD` or `KEYSTORE_PASSWORD` environment variables, by suffixing the environment variable name with `_FILE`. This is useful for passing secrets such as passwords to {{es}} without specifying them directly. + +For example, to set the {{es}} bootstrap password from a file, you can bind mount the file and set the `ELASTIC_PASSWORD_FILE` environment variable to the mount location. If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify: + +```sh +-e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt +``` + +You can override the default command for the image to pass {{es}} configuration parameters as command line options. For example: + +```sh +docker run bin/elasticsearch -Ecluster.name=mynewclustername +``` + +While bind-mounting your configuration files is usually the preferred method in production, you can also [create a custom Docker image](#_c_customized_image) that contains your configuration. + +### Mounting {{es}} configuration files [docker-config-bind-mount] + +Create custom config files and bind-mount them over the corresponding files in the Docker image. For example, to bind-mount `custom_elasticsearch.yml` with `docker run`, specify: + +```sh +-v full_path_to/custom_elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml +``` + +If you bind-mount a custom `elasticsearch.yml` file, ensure it includes the `network.host: 0.0.0.0` setting. This setting ensures the node is reachable for HTTP and transport traffic, provided its ports are exposed. The Docker image’s built-in `elasticsearch.yml` file includes this setting by default. + +::::{important} +The container **runs {{es}} as user `elasticsearch` using uid:gid `1000:0`**. Bind mounted host directories and files must be accessible by this user, and the data and log directories must be writable by this user. +:::: + + + +### Create an encrypted {{es}} keystore [docker-keystore-bind-mount] + +By default, {{es}} will auto-generate a keystore file for [secure settings](../../security/secure-settings.md). This file is obfuscated but not encrypted. + +To encrypt your secure settings with a password and have them persist outside the container, use a `docker run` command to manually create the keystore instead. The command must: + +* Bind-mount the `config` directory. The command will create an `elasticsearch.keystore` file in this directory. To avoid errors, do not directly bind-mount the `elasticsearch.keystore` file. +* Use the `elasticsearch-keystore` tool with the `create -p` option. You’ll be prompted to enter a password for the keystore. + +For example: + +```sh +docker run -it --rm \ +-v full_path_to/config:/usr/share/elasticsearch/config \ +docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ +bin/elasticsearch-keystore create -p +``` + +You can also use a `docker run` command to add or update secure settings in the keystore. You’ll be prompted to enter the setting values. If the keystore is encrypted, you’ll also be prompted to enter the keystore password. + +```sh +docker run -it --rm \ +-v full_path_to/config:/usr/share/elasticsearch/config \ +docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ +bin/elasticsearch-keystore \ +add my.secure.setting \ +my.other.secure.setting +``` + +If you’ve already created the keystore and don’t need to update it, you can bind-mount the `elasticsearch.keystore` file directly. You can use the `KEYSTORE_PASSWORD` environment variable to provide the keystore password to the container at startup. For example, a `docker run` command might have the following options: + +```sh +-v full_path_to/config/elasticsearch.keystore:/usr/share/elasticsearch/config/elasticsearch.keystore +-e KEYSTORE_PASSWORD=mypassword +``` + + +### Using custom Docker images [_c_customized_image] + +In some environments, it might make more sense to prepare a custom image that contains your configuration. A `Dockerfile` to achieve this might be as simple as: + +```sh +FROM docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} +COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ +``` + +You could then build and run the image with: + +```sh +docker build --tag=elasticsearch-custom . +docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom +``` + +Some plugins require additional security permissions. You must explicitly accept them either by: + +* Attaching a `tty` when you run the Docker image and allowing the permissions when prompted. +* Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command. + +See [Plugin management](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. + + +### Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] + +Here’s how to resolve common errors when running {{es}} with Docker. + + +### elasticsearch.keystore is a directory [_elasticsearch_keystore_is_a_directory] + +```txt +Exception in thread "main" org.elasticsearch.bootstrap.BootstrapException: java.io.IOException: Is a directory: SimpleFSIndexInput(path="/usr/share/elasticsearch/config/elasticsearch.keystore") Likely root cause: java.io.IOException: Is a directory +``` + +A [keystore-related](#docker-keystore-bind-mount) `docker run` command attempted to directly bind-mount an `elasticsearch.keystore` file that doesn’t exist. If you use the `-v` or `--volume` flag to mount a file that doesn’t exist, Docker instead creates a directory with the same name. + +To resolve this error: + +1. Delete the `elasticsearch.keystore` directory in the `config` directory. +2. Update the `-v` or `--volume` flag to point to the `config` directory path rather than the keystore file’s path. For an example, see [Create an encrypted {{es}} keystore](#docker-keystore-bind-mount). +3. Retry the command. + + +### elasticsearch.keystore: Device or resource busy [_elasticsearch_keystore_device_or_resource_busy] + +```txt +Exception in thread "main" java.nio.file.FileSystemException: /usr/share/elasticsearch/config/elasticsearch.keystore.tmp -> /usr/share/elasticsearch/config/elasticsearch.keystore: Device or resource busy +``` + +A `docker run` command attempted to [update the keystore](#docker-keystore-bind-mount) while directly bind-mounting the `elasticsearch.keystore` file. To update the keystore, the container requires access to other files in the `config` directory, such as `keystore.tmp`. + +To resolve this error: + +1. Update the `-v` or `--volume` flag to point to the `config` directory path rather than the keystore file’s path. For an example, see [Create an encrypted {{es}} keystore](#docker-keystore-bind-mount). +2. Retry the command. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md new file mode 100644 index 0000000000..c66513eeb0 --- /dev/null +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -0,0 +1,199 @@ +--- +sub: + stack-version: "9.0.0" +applies_to: + deployment: + self: +navigation_title: Production settings +--- + +# Using the Docker images in production [docker-prod-prerequisites] + +The following requirements and recommendations apply when running {{es}} in Docker in production, including some guidelines outlined in [](/deploy-manage/deploy/self-managed/important-system-configuration.md). + +The following requirements and recommendations apply when running {{es}} in Docker in production. + +## Set `vm.max_map_count` to at least `262144` [_set_vm_max_map_count_to_at_least_262144] + +The `vm.max_map_count` kernel setting must be set to at least `262144` for production use. + +How you set `vm.max_map_count` depends on your platform. + +:::{dropdown} Linux + +To view the current value for the `vm.max_map_count` setting, run: + +```sh +grep vm.max_map_count /etc/sysctl.conf +vm.max_map_count=262144 +``` + +To apply the setting on a live system, run: + +```sh +sysctl -w vm.max_map_count=262144 +``` + +To permanently change the value for the `vm.max_map_count` setting, update the value in `/etc/sysctl.conf`. +::: + +:::{dropdown} macOS with Docker for Mac + +The `vm.max_map_count` setting must be set within the xhyve virtual machine: + +1. From the command line, run: + + ```sh + screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty + ``` + +2. Press enter and use `sysctl` to configure `vm.max_map_count`: + + ```sh + sysctl -w vm.max_map_count=262144 + ``` + +3. To exit the `screen` session, type `Ctrl a d`. +::: + +:::{dropdown} Windows and macOS with Docker Desktop + +The `vm.max_map_count` setting must be set via docker-machine: + +```sh +docker-machine ssh +sudo sysctl -w vm.max_map_count=262144 +``` +::: + +:::{dropdown} Windows with Docker Desktop WSL 2 backend + +The `vm.max_map_count` setting must be set in the "docker-desktop" WSL instance before the {{es}} container will properly start. There are several ways to do this, depending on your version of Windows and your version of WSL. + +If you are on Windows 10 before version 22H2, or if you are on Windows 10 version 22H2 using the built-in version of WSL, you must either manually set it every time you restart Docker before starting your {{es}} container, or (if you do not wish to do so on every restart) you must globally set every WSL2 instance to have the `vm.max_map_count` changed. This is because these versions of WSL do not properly process the /etc/sysctl.conf file. + +To manually set it every time you reboot, you must run the following commands in a command prompt or PowerShell window every time you restart Docker: + +```sh +wsl -d docker-desktop -u root +sysctl -w vm.max_map_count=262144 +``` + +If you are on these versions of WSL and you do not want to have to run those commands every time you restart Docker, you can globally change every WSL distribution with this setting by modifying your %USERPROFILE%\.wslconfig as follows: + +```text +[wsl2] +kernelCommandLine = "sysctl.vm.max_map_count=262144" +``` + +This will cause all WSL2 VMs to have that setting assigned when they start. + +If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL distribution, perhaps with commands like this: + +```sh +wsl -d docker-desktop -u root +vi /etc/sysctl.conf +``` + +and appending a line which reads: + +```text +vm.max_map_count = 262144 +``` +::: + + +## Configuration files must be readable by the `elasticsearch` user [_configuration_files_must_be_readable_by_the_elasticsearch_user] + +By default, {{es}} runs inside the container as user `elasticsearch` using uid:gid `1000:0`. + +If you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user. In addition, this user must have write access to the [config, data and log dirs](important-settings-configuration.md#path-settings) ({{es}} needs write access to the `config` directory so that it can generate a keystore). A good strategy is to grant group access to gid `0` for the local directory. + +::::{important} +One exception is [Openshift](https://docs.openshift.com/container-platform/3.6/creating_images/guidelines.md#openshift-specific-guidelines), which runs containers using an arbitrarily assigned user ID. Openshift presents persistent volumes with the gid set to `0`, which works without any adjustments. +:::: + +For example, to prepare a local directory for storing data through a bind-mount: + +```sh +mkdir esdatadir +chmod g+rwx esdatadir +chgrp 0 esdatadir +``` + +You can also run an {{es}} container using both a custom UID and GID. You must ensure that file permissions will not prevent {{es}} from executing. You can use one of two options: + +* Bind-mount the `config`, `data` and `logs` directories. If you intend to install plugins and prefer not to [create a custom Docker image](#_c_customized_image), you must also bind-mount the `plugins` directory. +* Pass the `--group-add 0` command line option to `docker run`. This ensures that the user under which {{es}} is running is also a member of the `root` (GID 0) group inside the container. + + +## Increase ulimits for nofile and nproc [_increase_ulimits_for_nofile_and_nproc] + +Increased ulimits for [nofile](setting-system-settings.md) and [nproc](max-number-threads-check.md) must be available for the {{es}} containers. Verify the [init system](https://github.com/moby/moby/tree/ea4d1243953e6b652082305a9c3cda8656edab26/contrib/init) for the Docker daemon sets them to acceptable values. + +To check the Docker daemon defaults for ulimits, run: + +```sh +docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' +``` + +If needed, adjust them in the Daemon or override them per container. For example, when using `docker run`, set: + +```sh +--ulimit nofile=65535:65535 +``` + + +## Disable swapping [_disable_swapping] + +Swapping needs to be disabled for performance and node stability. For information about ways to do this, see [Disable swapping](setup-configuration-memory.md). + +If you opt for the `bootstrap.memory_lock: true` approach, you also need to define the `memlock: true` ulimit in the [Docker Daemon](https://docs.docker.com/engine/reference/commandline/dockerd/#default-ulimits), or explicitly set for the container as shown in the [sample compose file](#docker-compose-file). When using `docker run`, you can specify: + +```sh +-e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 +``` + + +## Randomize published ports [_randomize_published_ports] + +The image [exposes](https://docs.docker.com/engine/reference/builder/#/expose) TCP ports 9200 and 9300. For production clusters, randomizing the published ports with `--publish-all` is recommended, unless you are pinning one container per host. + + +## Manually set the heap size [docker-set-heap-size] + +By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. + +To manually set the heap size in production, bind mount a [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size) settings. + +For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. + +```sh +docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} +``` + +The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in production. + + +## Pin deployments to a specific image version [_pin_deployments_to_a_specific_image_version] + +Pin your deployments to a specific version of the {{es}} Docker image. For example `docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}}`. + + +## Always bind data volumes [_always_bind_data_volumes] + +You should use a volume bound on `/usr/share/elasticsearch/data` for the following reasons: + +1. The data of your {{es}} node won’t be lost if the container is killed +2. {{es}} is I/O sensitive and the Docker storage driver is not ideal for fast I/O +3. It allows the use of advanced [Docker volume plugins](https://docs.docker.com/engine/extend/plugins/#volume-plugins) + + +## Avoid using `loop-lvm` mode [_avoid_using_loop_lvm_mode] + +If you are using the devicemapper storage driver, do not use the default `loop-lvm` mode. Configure docker-engine to use [direct-lvm](https://docs.docker.com/engine/userguide/storagedriver/device-mapper-driver/#configure-docker-with-devicemapper). + + +## Centralize your logs [_centralize_your_logs] + +Consider centralizing your logs by using a different [logging driver](https://docs.docker.com/engine/admin/logging/overview/). Also note that the default json-file logging driver is not ideally suited for production use. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index 33293042b9..d0d1fdf2e6 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -8,6 +8,9 @@ sub: escape: "\\" stack-version: "9.0.0" navigation_title: "Linux or MacOS" +applies_to: + deployment: + self: --- # Install {{es}} from archive on Linux or MacOS [targz] @@ -20,10 +23,13 @@ navigation_title: "Linux or MacOS" :::{include} _snippets/es-releases.md ::: -::::{note} -{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) -:::: +:::{include} _snippets/java-version.md +::: +## Before you start + +:::{include} _snippets/prereqs.md +::: ## Step 1: Download and install the archive @@ -81,7 +87,7 @@ Alternatively, you can add a security override by following the instructions in ## Step 3: Start {{es}} [targz-running] -You have several options for starting {{es}} +You have several options for starting {{es}}: * [Run from the command line](#command-line) * [Run the node to be enrolled in an existing cluster](#existing-cluster) @@ -100,6 +106,11 @@ You have several options for starting {{es}} :::{include} _snippets/pw-env-var.md ::: +#### Configure {{es}} on the command line [targz-configuring] + +:::{include} _snippets/cmd-line-config.md +::: + ### Enroll the node in an existing cluster [existing-cluster] :::{include} _snippets/enroll-nodes.md @@ -110,36 +121,22 @@ You have several options for starting {{es}} :::{include} _snippets/targz-daemon.md ::: -## Step 4: Check that {{es}} is running [_check_that_elasticsearch_is_running] +## Step 4: Check that {{es}} is running [check_that_elasticsearch_is_running] :::{include} _snippets/check-es-running.md ::: -## Configure {{es}} on the command line [targz-configuring] - -{{es}} loads its configuration from the `$ES_HOME/config/elasticsearch.yml` file by default. The format of this config file is explained in [*Configuring {{es}}*](configure-elasticsearch.md). - -Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: - -```sh -./bin/elasticsearch -d -Ecluster.name=my_cluster -Enode.name=node_1 -``` - -::::{tip} -Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. -:::: - -## Connect clients to {{es}} [_connect_clients_to_es] +## Connect clients to {{es}} [connect_clients_to_es] :::{include} _snippets/connect-clients.md ::: -### Use the CA fingerprint [_use_the_ca_fingerprint_2] +### Use the CA fingerprint [use_the_ca_fingerprint] :::{include} _snippets/ca-fingerprint.md ::: -### Use the CA certificate [_use_the_ca_certificate_2] +### Use the CA certificate [use_the_ca_certificate] :::{include} _snippets/ca-cert.md ::: @@ -148,9 +145,9 @@ Typically, any cluster-wide settings (like `cluster.name`) should be added to th The archive distributions are entirely self-contained. All files and directories are, by default, contained within `$ES_HOME` — the directory created when unpacking the archive. -This is very convenient because you don’t have to create any directories to start using {{es}}, and uninstalling {{es}} is as easy as removing the `$ES_HOME` directory. However, it is advisable to change the default locations of the config directory, the data directory, and the logs directory so that you do not delete important data later on. +This is convenient because you don’t have to create any directories to start using {{es}}, and uninstalling {{es}} is as easy as removing the `$ES_HOME` directory. However, you should change the default locations of the config directory, the data directory, and the logs directory so that you do not delete important data later on. -| Type | Description | Default Location | Setting | +| Type | Description | Default location | Setting | | --- | --- | --- | --- | | home | {{es}} home directory or `$ES_HOME` | Directory created by unpacking the archive | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `$ES_HOME/bin` | | @@ -161,12 +158,12 @@ This is very convenient because you don’t have to create any directories to st | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `$ES_HOME/plugins` | | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | -### Security certificates and keys [_security_certificates_and_keys] +### Security certificates and keys [security_certificates_and_keys] :::{include} _snippets/security-files.md ::: -## Next steps [_next_steps] +## Next steps [next_steps] :::{include} _snippets/install-next-steps.md ::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index e00791c5e6..8628bb7fca 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -9,6 +9,9 @@ sub: escape: "\\" stack-version: "9.0.0" navigation_title: Debian +applies_to: + deployment: + self: --- # Install {{es}} with a Debian package [deb] @@ -21,12 +24,16 @@ The Debian package for {{es}} can be [downloaded from our website](#install-deb) :::{include} _snippets/es-releases.md ::: -::::{note} -{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) -:::: +:::{include} _snippets/java-version.md +::: + +## Before you start +:::{include} _snippets/prereqs.md +::: -## Import the {{es}} PGP key [deb-key] + +## Step 1: Import the {{es}} PGP key [deb-key] :::{include} _snippets/pgp-key.md ::: @@ -35,19 +42,32 @@ The Debian package for {{es}} can be [downloaded from our website](#install-deb) wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg ``` -## Installing from the APT repository [deb-repo] +## Step 2: Install {{es}} -You may need to install the `apt-transport-https` package on Debian before proceeding: +You have several options for installing the {{es}} Debian package: -```sh -sudo apt-get install apt-transport-https -``` +* [From the APT repository](#deb-repo) +* [Manually](#install-deb) + +### Install from the APT repository [deb-repo] -Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: +1. You may need to install the `apt-transport-https` package on Debian before proceeding: -```sh -echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list -``` + ```sh + sudo apt-get install apt-transport-https + ``` + +2. Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: + + ```sh + echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/9.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-9.x.list + ``` + +3. Install the {{es}} Debian package: + + ```sh + sudo apt-get update && sudo apt-get install elasticsearch + ``` :::{note} These instructions do not use `add-apt-repository` for several reasons: @@ -56,23 +76,17 @@ These instructions do not use `add-apt-repository` for several reasons: 2. `add-apt-repository` is not part of the default install on many distributions and requires a number of non-default dependencies. 3. Older versions of `add-apt-repository` always add a `deb-src` entry which will cause errors because we do not provide a source package. If you have added the `deb-src` entry, you will see an error like the following until you delete the `deb-src` line: - ``` + ```text Unable to find expected entry 'main/source/Sources' in Release file (Wrong sources.list entry or malformed file) ``` ::: -You can install the {{es}} Debian package with: - -```sh -sudo apt-get update && sudo apt-get install elasticsearch -``` - :::{warning} If two entries exist for the same {{es}} repository, you will see an error like this during `apt-get update`: -``` -Duplicate sources.list entry https://artifacts.elastic.co/packages/8.x/apt/ ... +```text +Duplicate sources.list entry https://artifacts.elastic.co/packages/9.x/apt/ ... ``` Examine `/etc/apt/sources.list.d/elasticsearch-9.x.list` for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. @@ -81,7 +95,7 @@ Examine `/etc/apt/sources.list.d/elasticsearch-9.x.list` for the duplicate entry :::{include} _snippets/skip-set-kernel-params.md ::: -## Download and install the Debian package manually [install-deb] +### Download and install the Debian package manually [install-deb] The Debian package for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: @@ -92,32 +106,45 @@ shasum -a 512 -c elasticsearch-{{stack-version}}-amd64.deb.sha512 <1> sudo dpkg -i elasticsearch-{{stack-version}}-amd64.deb ``` -1. Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch-{{version}}-amd64.deb: OK`. +1. Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch--amd64.deb: OK`. -## Start {{es}} with security enabled [deb-security-configuration] +## Step 3 (Optional): Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster] -:::{include} _snippets/auto-security-config.md +:::{include} _snippets/join-existing-cluster.md ::: -:::{include} _snippets/pw-env-var.md +## Step 4: Enable automatic creation of system indices [deb-enable-indices] + +:::{include} _snippets/enable-auto-indices.md ::: -### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster] +## Step 5: Run {{es}} with `systemd` [running-systemd] -:::{include} _snippets/join-existing-cluster.md +:::{include} _snippets/systemd.md ::: -## Enable automatic creation of system indices [deb-enable-indices] +### Start {{es}} automatically -:::{include} _snippets/enable-auto-indices.md +:::{include} _snippets/systemd-startup.md ::: -## Running {{es}} with `systemd` [running-systemd] +### Log to the systemd journal -:::{include} _snippets/systemd.md +:::{include} _snippets/systemd-journal.md +::: + +:::{include} _snippets/systemd-startup-timeout.md +::: + +### Security at startup [deb-security-configuration] + +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md ::: -## Check that {{es}} is running [deb-check-running] +## Step 6: Check that {{es}} is running [deb-check-running] :::{include} _snippets/check-es-running.md ::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 0370b98082..5bc1252a62 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -4,6 +4,9 @@ mapped_pages: navigation_title: "Docker" sub: stack-version: "9.0.0" +applies_to: + deployment: + self: --- # Install {{es}} with Docker [docker] @@ -15,610 +18,11 @@ Docker images for {{es}} are available from the Elastic Docker registry. A list ::::{tip} If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. - -:::: - - -## Run {{es}} in Docker [docker-cli-run-dev-mode] - -Use Docker commands to start a single-node {{es}} cluster for development or testing. You can then run additional Docker commands to add nodes to the test cluster or run {{kib}}. - -::::{tip} -This setup doesn’t run multiple {{es}} nodes or {{kib}} by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. See [Start a multi-node cluster with Docker Compose](#docker-compose-file). :::: +Review the following guides to install {{es}} with Docker: -### Hardened Docker images [docker-wolfi-hardened-image] - -You can also use the hardened [Wolfi](https://wolfi.dev/) image for additional security. Using Wolfi images requires Docker version 20.10.10 or higher. - -To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. - -For example: - -```sh -docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} -``` - - -### Start a single-node cluster [_start_a_single_node_cluster] - -1. Install Docker. Visit [Get Docker](https://docs.docker.com/get-docker/) to install Docker for your environment. - - If using Docker Desktop, make sure to allocate at least 4GB of memory. You can adjust memory usage in Docker Desktop by going to **Settings > Resources**. - -2. Create a new docker network. - - ```sh - docker network create elastic - ``` - -3. Pull the {{es}} Docker image. - - ```sh - docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} - ``` - -4. Optional: Install [Cosign](https://docs.sigstore.dev/cosign/system_config/installation/) for your environment. Then use Cosign to verify the {{es}} image’s signature. - - $$$docker-verify-signature$$$ - - ```sh - wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} - ``` - - The `cosign` command prints the check results and the signature payload in JSON format: - - ```sh - Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- - The following checks were performed on each of these signatures: - - The cosign claims were validated - - Existence of the claims in the transparency log was verified offline - - The signatures were verified against the specified public key - ``` - -5. Start an {{es}} container. - - ```sh - docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} - ``` - - ::::{tip} - Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](#docker-set-heap-size). - :::: - - - {{ml-cap}} features such as [semantic search with ELSER](/solutions/search/semantic-search/semantic-search-elser-ingest-pipelines.md) require a larger container with more than 1GB of memory. If you intend to use the {{ml}} capabilities, then start the container with this command: - - ```sh - docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} - ``` - - The command prints the `elastic` user password and an enrollment token for {{kib}}. - -6. Copy the generated `elastic` password and enrollment token. These credentials are only shown when you start {{es}} for the first time. You can regenerate the credentials using the following commands. - - ```sh - docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic - docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana - ``` - - We recommend storing the `elastic` password as an environment variable in your shell. Example: - - ```sh - export ELASTIC_PASSWORD="your_password" - ``` - -7. Copy the `http_ca.crt` SSL certificate from the container to your local machine. - - ```sh - docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . - ``` - -8. Make a REST API call to {{es}} to ensure the {{es}} container is running. - - ```sh - curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 - ``` - - - -### Add more nodes [_add_more_nodes] - -1. Use an existing node to generate a enrollment token for the new node. - - ```sh - docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node - ``` - - The enrollment token is valid for 30 minutes. - -2. Start a new {{es}} container. Include the enrollment token as an environment variable. - - ```sh - docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} - ``` - -3. Call the [cat nodes API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) to verify the node was added to the cluster. - - ```sh - curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes - ``` - -### Run {{kib}} [run-kibana-docker] - -1. Pull the {{kib}} Docker image. - - ```sh - docker pull docker.elastic.co/kibana/kibana:{{stack-version}} - ``` - -2. Optional: Verify the {{kib}} image’s signature. - - ```sh - wget https://artifacts.elastic.co/cosign.pub - cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} - ``` - -3. Start a {{kib}} container. - - ```sh - docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} - ``` - -4. When {{kib}} starts, it outputs a unique generated link to the terminal. To access {{kib}}, open this link in a web browser. -5. In your browser, enter the enrollment token that was generated when you started {{es}}. - - To regenerate the token, run: - - ```sh - docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana - ``` - -6. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. - - To regenerate the password, run: - - ```sh - docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic - ``` - - - -### Remove containers [remove-containers-docker] - -To remove the containers and their network, run: - -```sh -# Remove the Elastic network -docker network rm elastic - -# Remove {{es}} containers -docker rm es01 -docker rm es02 - -# Remove the {{kib}} container -docker rm kib01 -``` - - -### Next steps [_next_steps_5] - -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](#docker-prod-prerequisites) to apply when running {{es}} in Docker in production. - - - -## Start a multi-node cluster with Docker Compose [docker-compose-file] - -Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Compose lets you start multiple containers with a single command. - -### Configure and start the cluster [_configure_and_start_the_cluster] - -1. Install Docker Compose. Visit the [Docker Compose docs](https://docs.docker.com/compose/install/) to install Docker Compose for your environment. - - If you’re using Docker Desktop, Docker Compose is installed automatically. Make sure to allocate at least 4GB of memory to Docker Desktop. You can adjust memory usage in Docker Desktop by going to **Settings > Resources**. - -2. Create or navigate to an empty directory for the project. -3. Download and save the following files in the project directory: - - * [`.env`](https://github.com/elastic/elasticsearch/blob/master/docs/reference/setup/install/docker/.env) - * [`docker-compose.yml`](https://github.com/elastic/elasticsearch/blob/master/docs/reference/setup/install/docker/docker-compose.yml) - -4. In the `.env` file, specify a password for the `ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. - - The passwords must be alphanumeric and can’t contain special characters, such as `!` or `@`. The bash script included in the `docker-compose.yml` file only works with alphanumeric characters. Example: - - ```txt - # Password for the 'elastic' user (at least 6 characters) - ELASTIC_PASSWORD=changeme - - # Password for the 'kibana_system' user (at least 6 characters) - KIBANA_PASSWORD=changeme - ... - ``` - -5. In the `.env` file, set `STACK_VERSION` to the current {{stack}} version. - - ```txt - ... - # Version of Elastic products - STACK_VERSION={{stack-version}} - ... - ``` - -6. By default, the Docker Compose configuration exposes port `9200` on all network interfaces. - - To avoid exposing port `9200` to external hosts, set `ES_PORT` to `127.0.0.1:9200` in the `.env` file. This ensures {{es}} is only accessible from the host machine. - - ```txt - ... - # Port to expose {{es}} HTTP API to the host - #ES_PORT=9200 - ES_PORT=127.0.0.1:9200 - ... - ``` - -7. To start the cluster, run the following command from the project directory. - - ```sh - docker-compose up -d - ``` - -8. After the cluster has started, open [http://localhost:5601](http://localhost:5601) in a web browser to access {{kib}}. -9. Log in to {{kib}} as the `elastic` user using the `ELASTIC_PASSWORD` you set earlier. - - -### Stop and remove the cluster [_stop_and_remove_the_cluster] - -To stop the cluster, run `docker-compose down`. The data in the Docker volumes is preserved and loaded when you restart the cluster with `docker-compose up`. - -```sh -docker-compose down -``` - -To delete the network, containers, and volumes when you stop the cluster, specify the `-v` option: - -```sh -docker-compose down -v -``` - - -### Next steps [_next_steps_6] - -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](#docker-prod-prerequisites) to apply when running {{es}} in Docker in production. - - - -## Using the Docker images in production [docker-prod-prerequisites] - -The following requirements and recommendations apply when running {{es}} in Docker in production. - -### Set `vm.max_map_count` to at least `262144` [_set_vm_max_map_count_to_at_least_262144] - -The `vm.max_map_count` kernel setting must be set to at least `262144` for production use. - -How you set `vm.max_map_count` depends on your platform. - -#### Linux [_linux] - -To view the current value for the `vm.max_map_count` setting, run: - -```sh -grep vm.max_map_count /etc/sysctl.conf -vm.max_map_count=262144 -``` - -To apply the setting on a live system, run: - -```sh -sysctl -w vm.max_map_count=262144 -``` - -To permanently change the value for the `vm.max_map_count` setting, update the value in `/etc/sysctl.conf`. - - -#### macOS with [Docker for Mac](https://docs.docker.com/docker-for-mac) [_macos_with_docker_for_machttpsdocs_docker_comdocker_for_mac] - -The `vm.max_map_count` setting must be set within the xhyve virtual machine: - -1. From the command line, run: - - ```sh - screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty - ``` - -2. Press enter and use `sysctl` to configure `vm.max_map_count`: - - ```sh - sysctl -w vm.max_map_count=262144 - ``` - -3. To exit the `screen` session, type `Ctrl a d`. - - -#### Windows and macOS with [Docker Desktop](https://www.docker.com/products/docker-desktop) [_windows_and_macos_with_docker_desktophttpswww_docker_comproductsdocker_desktop] - -The `vm.max_map_count` setting must be set via docker-machine: - -```sh -docker-machine ssh -sudo sysctl -w vm.max_map_count=262144 -``` - - -#### Windows with [Docker Desktop WSL 2 backend](https://docs.docker.com/docker-for-windows/wsl) [_windows_with_docker_desktop_wsl_2_backendhttpsdocs_docker_comdocker_for_windowswsl] - -The `vm.max_map_count` setting must be set in the "docker-desktop" WSL instance before the {{es}} container will properly start. There are several ways to do this, depending on your version of Windows and your version of WSL. - -If you are on Windows 10 before version 22H2, or if you are on Windows 10 version 22H2 using the built-in version of WSL, you must either manually set it every time you restart Docker before starting your {{es}} container, or (if you do not wish to do so on every restart) you must globally set every WSL2 instance to have the `vm.max_map_count` changed. This is because these versions of WSL do not properly process the /etc/sysctl.conf file. - -To manually set it every time you reboot, you must run the following commands in a command prompt or PowerShell window every time you restart Docker: - -```sh -wsl -d docker-desktop -u root -sysctl -w vm.max_map_count=262144 -``` - -If you are on these versions of WSL and you do not want to have to run those commands every time you restart Docker, you can globally change every WSL distribution with this setting by modifying your %USERPROFILE%\.wslconfig as follows: - -```text -[wsl2] -kernelCommandLine = "sysctl.vm.max_map_count=262144" -``` - -This will cause all WSL2 VMs to have that setting assigned when they start. - -If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL distribution, perhaps with commands like this: - -```sh -wsl -d docker-desktop -u root -vi /etc/sysctl.conf -``` - -and appending a line which reads: - -```text -vm.max_map_count = 262144 -``` - - - -### Configuration files must be readable by the `elasticsearch` user [_configuration_files_must_be_readable_by_the_elasticsearch_user] - -By default, {{es}} runs inside the container as user `elasticsearch` using uid:gid `1000:0`. - -::::{important} -One exception is [Openshift](https://docs.openshift.com/container-platform/3.6/creating_images/guidelines.md#openshift-specific-guidelines), which runs containers using an arbitrarily assigned user ID. Openshift presents persistent volumes with the gid set to `0`, which works without any adjustments. -:::: - - -If you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user. In addition, this user must have write access to the [config, data and log dirs](important-settings-configuration.md#path-settings) ({{es}} needs write access to the `config` directory so that it can generate a keystore). A good strategy is to grant group access to gid `0` for the local directory. - -For example, to prepare a local directory for storing data through a bind-mount: - -```sh -mkdir esdatadir -chmod g+rwx esdatadir -chgrp 0 esdatadir -``` - -You can also run an {{es}} container using both a custom UID and GID. You must ensure that file permissions will not prevent {{es}} from executing. You can use one of two options: - -* Bind-mount the `config`, `data` and `logs` directories. If you intend to install plugins and prefer not to [create a custom Docker image](#_c_customized_image), you must also bind-mount the `plugins` directory. -* Pass the `--group-add 0` command line option to `docker run`. This ensures that the user under which {{es}} is running is also a member of the `root` (GID 0) group inside the container. - - -### Increase ulimits for nofile and nproc [_increase_ulimits_for_nofile_and_nproc] - -Increased ulimits for [nofile](setting-system-settings.md) and [nproc](max-number-threads-check.md) must be available for the {{es}} containers. Verify the [init system](https://github.com/moby/moby/tree/ea4d1243953e6b652082305a9c3cda8656edab26/contrib/init) for the Docker daemon sets them to acceptable values. - -To check the Docker daemon defaults for ulimits, run: - -```sh -docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' -``` - -If needed, adjust them in the Daemon or override them per container. For example, when using `docker run`, set: - -```sh ---ulimit nofile=65535:65535 -``` - - -### Disable swapping [_disable_swapping] - -Swapping needs to be disabled for performance and node stability. For information about ways to do this, see [Disable swapping](setup-configuration-memory.md). - -If you opt for the `bootstrap.memory_lock: true` approach, you also need to define the `memlock: true` ulimit in the [Docker Daemon](https://docs.docker.com/engine/reference/commandline/dockerd/#default-ulimits), or explicitly set for the container as shown in the [sample compose file](#docker-compose-file). When using `docker run`, you can specify: - -```sh --e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 -``` - - -### Randomize published ports [_randomize_published_ports] - -The image [exposes](https://docs.docker.com/engine/reference/builder/#/expose) TCP ports 9200 and 9300. For production clusters, randomizing the published ports with `--publish-all` is recommended, unless you are pinning one container per host. - - -### Manually set the heap size [docker-set-heap-size] - -By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. - -To manually set the heap size in production, bind mount a [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size) settings. - -For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. - -```sh -docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -``` - -The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in production. - - -### Pin deployments to a specific image version [_pin_deployments_to_a_specific_image_version] - -Pin your deployments to a specific version of the {{es}} Docker image. For example `docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}}`. - - -### Always bind data volumes [_always_bind_data_volumes] - -You should use a volume bound on `/usr/share/elasticsearch/data` for the following reasons: - -1. The data of your {{es}} node won’t be lost if the container is killed -2. {{es}} is I/O sensitive and the Docker storage driver is not ideal for fast I/O -3. It allows the use of advanced [Docker volume plugins](https://docs.docker.com/engine/extend/plugins/#volume-plugins) - - -### Avoid using `loop-lvm` mode [_avoid_using_loop_lvm_mode] - -If you are using the devicemapper storage driver, do not use the default `loop-lvm` mode. Configure docker-engine to use [direct-lvm](https://docs.docker.com/engine/userguide/storagedriver/device-mapper-driver/#configure-docker-with-devicemapper). - - -### Centralize your logs [_centralize_your_logs] - -Consider centralizing your logs by using a different [logging driver](https://docs.docker.com/engine/admin/logging/overview/). Also note that the default json-file logging driver is not ideally suited for production use. - - - -## Configuring {{es}} with Docker [docker-configuration-methods] - -When you run in Docker, the [{{es}} configuration files](configure-elasticsearch.md#config-files-location) are loaded from `/usr/share/elasticsearch/config/`. - -To use custom configuration files, you [bind-mount the files](#docker-config-bind-mount) over the configuration files in the image. - -You can set individual {{es}} configuration parameters using Docker environment variables. The [sample compose file](#docker-compose-file) and the [single-node example](#docker-cli-run-dev-mode) use this method. You can use the setting name directly as the environment variable name. If you cannot do this, for example because your orchestration platform forbids periods in environment variable names, then you can use an alternative style by converting the setting name as follows. - -1. Change the setting name to uppercase -2. Prefix it with `ES_SETTING_` -3. Escape any underscores (`_`) by duplicating them -4. Convert all periods (`.`) to underscores (`_`) - -For example, `-e bootstrap.memory_lock=true` becomes `-e ES_SETTING_BOOTSTRAP_MEMORY__LOCK=true`. - -You can use the contents of a file to set the value of the `ELASTIC_PASSWORD` or `KEYSTORE_PASSWORD` environment variables, by suffixing the environment variable name with `_FILE`. This is useful for passing secrets such as passwords to {{es}} without specifying them directly. - -For example, to set the {{es}} bootstrap password from a file, you can bind mount the file and set the `ELASTIC_PASSWORD_FILE` environment variable to the mount location. If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify: - -```sh --e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -``` - -You can override the default command for the image to pass {{es}} configuration parameters as command line options. For example: - -```sh -docker run bin/elasticsearch -Ecluster.name=mynewclustername -``` - -While bind-mounting your configuration files is usually the preferred method in production, you can also [create a custom Docker image](#_c_customized_image) that contains your configuration. - -### Mounting {{es}} configuration files [docker-config-bind-mount] - -Create custom config files and bind-mount them over the corresponding files in the Docker image. For example, to bind-mount `custom_elasticsearch.yml` with `docker run`, specify: - -```sh --v full_path_to/custom_elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -``` - -If you bind-mount a custom `elasticsearch.yml` file, ensure it includes the `network.host: 0.0.0.0` setting. This setting ensures the node is reachable for HTTP and transport traffic, provided its ports are exposed. The Docker image’s built-in `elasticsearch.yml` file includes this setting by default. - -::::{important} -The container **runs {{es}} as user `elasticsearch` using uid:gid `1000:0`**. Bind mounted host directories and files must be accessible by this user, and the data and log directories must be writable by this user. -:::: - - - -### Create an encrypted {{es}} keystore [docker-keystore-bind-mount] - -By default, {{es}} will auto-generate a keystore file for [secure settings](../../security/secure-settings.md). This file is obfuscated but not encrypted. - -To encrypt your secure settings with a password and have them persist outside the container, use a `docker run` command to manually create the keystore instead. The command must: - -* Bind-mount the `config` directory. The command will create an `elasticsearch.keystore` file in this directory. To avoid errors, do not directly bind-mount the `elasticsearch.keystore` file. -* Use the `elasticsearch-keystore` tool with the `create -p` option. You’ll be prompted to enter a password for the keystore. - -For example: - -```sh -docker run -it --rm \ --v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ -bin/elasticsearch-keystore create -p -``` - -You can also use a `docker run` command to add or update secure settings in the keystore. You’ll be prompted to enter the setting values. If the keystore is encrypted, you’ll also be prompted to enter the keystore password. - -```sh -docker run -it --rm \ --v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ -bin/elasticsearch-keystore \ -add my.secure.setting \ -my.other.secure.setting -``` - -If you’ve already created the keystore and don’t need to update it, you can bind-mount the `elasticsearch.keystore` file directly. You can use the `KEYSTORE_PASSWORD` environment variable to provide the keystore password to the container at startup. For example, a `docker run` command might have the following options: - -```sh --v full_path_to/config/elasticsearch.keystore:/usr/share/elasticsearch/config/elasticsearch.keystore --e KEYSTORE_PASSWORD=mypassword -``` - - -### Using custom Docker images [_c_customized_image] - -In some environments, it might make more sense to prepare a custom image that contains your configuration. A `Dockerfile` to achieve this might be as simple as: - -```sh -FROM docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ -``` - -You could then build and run the image with: - -```sh -docker build --tag=elasticsearch-custom . -docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom -``` - -Some plugins require additional security permissions. You must explicitly accept them either by: - -* Attaching a `tty` when you run the Docker image and allowing the permissions when prompted. -* Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command. - -See [Plugin management](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. - - -### Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] - -Here’s how to resolve common errors when running {{es}} with Docker. - - -### elasticsearch.keystore is a directory [_elasticsearch_keystore_is_a_directory] - -```txt -Exception in thread "main" org.elasticsearch.bootstrap.BootstrapException: java.io.IOException: Is a directory: SimpleFSIndexInput(path="/usr/share/elasticsearch/config/elasticsearch.keystore") Likely root cause: java.io.IOException: Is a directory -``` - -A [keystore-related](#docker-keystore-bind-mount) `docker run` command attempted to directly bind-mount an `elasticsearch.keystore` file that doesn’t exist. If you use the `-v` or `--volume` flag to mount a file that doesn’t exist, Docker instead creates a directory with the same name. - -To resolve this error: - -1. Delete the `elasticsearch.keystore` directory in the `config` directory. -2. Update the `-v` or `--volume` flag to point to the `config` directory path rather than the keystore file’s path. For an example, see [Create an encrypted {{es}} keystore](#docker-keystore-bind-mount). -3. Retry the command. - - -### elasticsearch.keystore: Device or resource busy [_elasticsearch_keystore_device_or_resource_busy] - -```txt -Exception in thread "main" java.nio.file.FileSystemException: /usr/share/elasticsearch/config/elasticsearch.keystore.tmp -> /usr/share/elasticsearch/config/elasticsearch.keystore: Device or resource busy -``` - -A `docker run` command attempted to [update the keystore](#docker-keystore-bind-mount) while directly bind-mounting the `elasticsearch.keystore` file. To update the keystore, the container requires access to other files in the `config` directory, such as `keystore.tmp`. - -To resolve this error: - -1. Update the `-v` or `--volume` flag to point to the `config` directory path rather than the keystore file’s path. For an example, see [Create an encrypted {{es}} keystore](#docker-keystore-bind-mount). -2. Retry the command. +* [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md) +* [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md) +* [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md) +* [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md) \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index aebdc6c6ff..41b851c314 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -9,6 +9,9 @@ sub: escape: "\\" stack-version: "9.0.0" navigation_title: "RPM" +applies_to: + deployment: + self: --- # Install {{es}} with RPM [rpm] @@ -25,15 +28,15 @@ RPM install is not supported on distributions with old versions of RPM, such as :::{include} _snippets/es-releases.md ::: -::::{note} -{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) -:::: +:::{include} _snippets/java-version.md +::: -::::{tip} -For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](installing-elasticsearch.md). -:::: +## Before you start -## Import the {{es}} PGP key [rpm-key] +:::{include} _snippets/prereqs.md +::: + +## Step 1: Import the {{es}} PGP key [rpm-key] :::{include} _snippets/pgp-key.md ::: @@ -42,11 +45,42 @@ For a step-by-step example of setting up the {{stack}} on your own premises, try rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ``` -## Installing from the RPM repository [rpm-repo] +## Step 2: Install {{es}} + +You have several options for installing the {{es}} RPM package: + +* [From the RPM repository](#rpm-repo) +* [Manually](#install-deb) + +### Install from the RPM repository [rpm-repo] Create a file called `elasticsearch.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: -## Download and install the RPM manually [install-rpm] +```ini +[elasticsearch] +name=Elasticsearch repository for 9.x packages +baseurl=https://artifacts.elastic.co/packages/9.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=0 +autorefresh=1 +type=rpm-md +``` +And your repository is ready for use. You can now install {{es}} with one of the following commands: + +```sh +sudo yum install --enablerepo=elasticsearch elasticsearch <1> +sudo dnf install --enablerepo=elasticsearch elasticsearch <2> +sudo zypper modifyrepo --enable elasticsearch && \ + sudo zypper install elasticsearch; \ + sudo zypper modifyrepo --disable elasticsearch <3> +``` + +1. Use `yum` on CentOS and older Red Hat based distributions. +2. Use `dnf` on Fedora and other newer Red Hat distributions. +3. Use `zypper` on OpenSUSE based distributions. + +### Download and install the RPM manually [install-rpm] The RPM for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: @@ -57,35 +91,49 @@ shasum -a 512 -c elasticsearch-{{stack-version}}-x86_64.rpm.sha512 <1> sudo rpm --install elasticsearch-{{stack-version}}-x86_64.rpm ``` -1. Compares the SHA of the downloaded RPM and the published checksum, which should output `elasticsearch-{{version}}-x86_64.rpm: OK`. +1. Compares the SHA of the downloaded RPM and the published checksum, which should output `elasticsearch--x86_64.rpm: OK`. :::{include} _snippets/skip-set-kernel-params.md ::: -## Start {{es}} with security enabled [rpm-security-configuration] +### Step 3 (Optional): Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster_2] -:::{include} _snippets/auto-security-config.md +:::{include} _snippets/join-existing-cluster.md ::: -:::{include} _snippets/pw-env-var.md + +## Step 4: Enable automatic creation of system indices [rpm-enable-indices] + +:::{include} _snippets/enable-auto-indices.md ::: -### Reconfigure a node to join an existing cluster [_reconfigure_a_node_to_join_an_existing_cluster_2] +## Step 5: Run {{es}} with `systemd` [running-systemd] -:::{include} _snippets/join-existing-cluster.md +:::{include} _snippets/systemd.md ::: -## Enable automatic creation of system indices [rpm-enable-indices] +### Start {{es}} automatically -:::{include} _snippets/enable-auto-indices.md +:::{include} _snippets/systemd-startup.md ::: -## Running {{es}} with `systemd` [running-systemd] +### Log to the systemd journal -:::{include} _snippets/systemd.md +:::{include} _snippets/systemd-journal.md +::: + +:::{include} _snippets/systemd-startup-timeout.md +::: + +### Security at startup [deb-security-configuration] + +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/pw-env-var.md ::: -## Check that {{es}} is running [rpm-check-running] +## Step 6: Check that {{es}} is running [rpm-check-running] :::{include} _snippets/check-es-running.md ::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 8df9ca1bcf..2da5fe1b04 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -8,28 +8,34 @@ sub: escape: "^" stack-version: "9.0.0" navigation_title: Windows +applies_to: + deployment: + self: --- # Install {{es}} with .zip on Windows [zip-windows] -{{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup {{es}} to run as a service. +{{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will set up {{es}} to [run as a service](#windows-service). :::{include} _snippets/trial.md ::: -::::{note} -On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. -:::: - :::{include} _snippets/es-releases.md ::: +:::{include} _snippets/java-version.md +::: + ::::{note} -{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version) +On Windows, the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows, it can be installed through Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you can't install the Microsoft Universal C Runtime library, you can still use the rest of {{es}} if you disable the {{ml}} feature. :::: +## Before you start -## Download and install the `.zip` package [install-windows] +:::{include} _snippets/prereqs.md +::: + +## Step 1: Download and install the `.zip` package [install-windows] % link url manually set Download the `.zip` archive for {{es}} {{stack-version}} from: [https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-9.0.0-windows-x86_64.zip) @@ -40,17 +46,27 @@ Unzip it with your favorite unzip tool. This will create a folder called `elasti cd C:\Program Files\elasticsearch-{{stack-version}} ``` -## Enable automatic creation of system indices [windows-enable-indices] +## Step 2: Enable automatic creation of system indices [windows-enable-indices] :::{include} _snippets/enable-auto-indices.md ::: -## Run {{es}} from the command line [windows-running] +## Step 3: Run {{es}} + +You have several options for starting {{es}}: + +* [Run from the command line](#command-line) +* [Run the node to be enrolled in an existing cluster](#existing-cluster) +* [Install and run as a service](#windows-service) + +You can run {{es}} [from the command line](#command-line), or install and run {{es}} [as a service](#windows-service). + +### Run {{es}} from the command line [command-line] :::{include} _snippets/zip-windows-start.md ::: -### Security at startup [security-at-startup] +#### Security at startup [security-at-startup] :::{include} _snippets/auto-security-config.md ::: @@ -58,36 +74,17 @@ cd C:\Program Files\elasticsearch-{{stack-version}} :::{include} _snippets/pw-env-var.md ::: -### Enroll nodes in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] +#### Configure {{es}} on the command line [windows-configuring] -:::{include} _snippets/enroll-nodes.md +:::{include} _snippets/cmd-line-config.md ::: -## Configure {{es}} on the command line [windows-configuring] - -{{es}} loads its configuration from the `%ES_HOME%\config\elasticsearch.yml` file by default. The format of this config file is explained in [](configure-elasticsearch.md). - -Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: - -```sh -.\bin\elasticsearch.bat -Ecluster.name=my_cluster -Enode.name=node_1 -``` +### Enroll the node in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] -::::{note} -Values that contain spaces must be surrounded with quotes. For instance `-Epath.logs="C:\My Logs\logs"`. -:::: - - -::::{tip} -Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. -:::: - -## Check that {{es}} is running [_check_that_elasticsearch_is_running_2] - -:::{include} _snippets/check-es-running.md +:::{include} _snippets/enroll-nodes.md ::: -## Install and run {{es}} as a service on Windows [windows-service] +### Install and run {{es}} as a service on Windows [windows-service] You can install {{es}} as a service that runs in the background or starts automatically at boot time without user interaction. @@ -100,6 +97,12 @@ You can install {{es}} as a service that runs in the background or starts automa The service 'elasticsearch-service-x64' has been installed. ``` + `ES_JAVA_HOME` is the installation directory of the desired JVM to run the service under. You can change this value using an [environment variable](#windows-service-settings). + + ::::{note} + While a JRE can be used for the {{es}} service, its use is discouraged and using a JRE will trigger a warning. Use is discouraged due to its use of a client VM, as opposed to a server JVM which offers better performance for long-running applications. + :::: + 2. Start {{es}} as a service. When {{es}} starts, authentication is enabled by default: ```sh @@ -116,106 +119,85 @@ You can install {{es}} as a service that runs in the background or starts automa C:\Program Files\elasticsearch-{{stack-version}}\bin>\bin\elasticsearch-reset-password -u elastic ``` - -::::{note} -While a JRE can be used for the {{es}} service, due to its use of a client VM (as opposed to a server JVM which offers better performance for long-running applications) its usage is discouraged and a warning will be issued. -:::: - - -::::{note} -The system environment variable `ES_JAVA_HOME` should be set to the path of the JDK installation that you want the service to use. If you upgrade the JDK, you are not required to the reinstall the service but you must set the value of the system environment variable `ES_JAVA_HOME` to the path to the new JDK installation. However, upgrading across JVM types (e.g. JRE versus SE) is not supported, and does require the service to be reinstalled. -:::: - - -### Manage {{es}} as a service on Windows [windows-service-manage] +#### Manage {{es}} as a service on Windows [windows-service-manage] Run the `elasticsearch-service.bat` script in the `bin\` folder to install, remove, manage, or configure the service and potentially start and stop the service from the command line. ```sh C:\Program Files\elasticsearch-{{stack-version}}\bin>elasticsearch-service.bat +``` -Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] +Usage: +``` +elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] ``` -The script requires one parameter (the command to execute), followed by an optional one indicating the service id (useful when installing multiple {{es}} services). +The script requires one parameter (the command to execute), followed by an optional one indicating the service ID (useful when installing multiple {{es}} services). The commands available are: -`install` -: Install {{es}} as a service - -`remove` -: Remove the installed {{es}} service (and stop the service if started) - -`start` -: Start the {{es}} service (if installed) - -`stop` -: Stop the {{es}} service (if started) - -`manager` -: Start a GUI for managing the installed service - - -## Customize service settings [windows-service-settings] - -The {{es}} service can be configured prior to installation by setting the following environment variables (either using the [set command](https://technet.microsoft.com/en-us/library/cc754250(v=ws.10).aspx) from the command line, or through the **System Properties→Environment Variables** GUI). - -`SERVICE_ID` -: A unique identifier for the service. Useful if installing multiple instances on the same machine. Defaults to `elasticsearch-service-x64`. +| Command | Description | +| --- | --- | +| +| `install` | Install {{es}} as a service | +| `remove` | Remove the installed {{es}} service (and stop the service if started) | +| `start` | Start the {{es}} service (if installed) | +| `stop` | Stop the {{es}} service (if started) | +| `manager` | Start a GUI for managing the installed service | -`SERVICE_USERNAME` -: The user to run as, defaults to the local system account. -`SERVICE_PASSWORD` -: The password for the user specified in `%SERVICE_USERNAME%`. +#### Customize service settings [windows-service-settings] -`SERVICE_DISPLAY_NAME` -: The name of the service. Defaults to `{{es}} %SERVICE_ID%`. +You can customize the service settings before installation using environment variables, or after installation using the Manager GUI. -`SERVICE_DESCRIPTION` -: The description of the service. Defaults to `{{es}} Windows Service - https://elastic.co`. +`elasticsearch-service.bat` relies on [Apache Commons Daemon](https://commons.apache.org/proper/commons-daemon/) project to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled. -`ES_JAVA_HOME` -: The installation directory of the desired JVM to run the service under. +::::{tab-set} +:::{tab-item} Environment variables (pre-install) -`SERVICE_LOG_DIR` -: Service log directory, defaults to `%ES_HOME%\logs`. Note that this does not control the path for the {{es}} logs; the path for these is set via the setting `path.logs` in the `elasticsearch.yml` configuration file, or on the command line. +The {{es}} service can be configured prior to installation by setting the following environment variables (either using the [set command](https://technet.microsoft.com/en-us/library/cc754250(v=ws.10).aspx) from the command line, or through the **System Properties > Environment Variables** GUI). -`ES_PATH_CONF` -: Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files), defaults to `%ES_HOME%\config`. +| Environment variable | Description | +| --- | --- | +| `SERVICE_ID` | A unique identifier for the service. Useful if installing multiple instances on the same machine. Defaults to `elasticsearch-service-x64`. | +| `SERVICE_USERNAME` | The user to run as, defaults to the local system account. | +| `SERVICE_PASSWORD` | The password for the user specified in `%SERVICE_USERNAME%`. | +| `SERVICE_DISPLAY_NAME` | The name of the service. Defaults to `{{es}} %SERVICE_ID%`. | +| `SERVICE_DESCRIPTION` | The description of the service. Defaults to `{{es}} Windows Service - https://elastic.co`. | +| `ES_JAVA_HOME` | The installation directory of the desired JVM to run the service under. | +| `SERVICE_LOG_DIR` | Service log directory, defaults to `%ES_HOME%\logs`. Note that this does not control the path for the {{es}} logs; the path for these is set via the setting `path.logs` in the `elasticsearch.yml` configuration file, or on the command line. | +| `ES_PATH_CONF` | Configuration file directory (which needs to include `elasticsearch.yml`, `jvm.options`, and `log4j2.properties` files), defaults to `%ES_HOME%\config`. | +| `ES_JAVA_OPTS` | Any additional JVM system properties you may want to apply. | +| `ES_START_TYPE` | Startup mode for the service. Can be either `auto` or `manual` (default). | +| `ES_STOP_TIMEOUT` | The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`. | -`ES_JAVA_OPTS` -: Any additional JVM system properties you may want to apply. +::: +:::{tab-item} Manager GUI (post-install) -`ES_START_TYPE` -: Startup mode for the service. Can be either `auto` or `manual` (default). +It is also possible to configure the service after it’s been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. To open the manager GUI, run the following command: -`ES_STOP_TIMEOUT` -: The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`. +```sh +elasticsearch-service.bat manager +``` -::::{note} -At its core, `elasticsearch-service.bat` relies on [Apache Commons Daemon](https://commons.apache.org/proper/commons-daemon/) project to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled. +Most changes (like JVM settings) made through the manager GUI will require a restart of the service to take affect. +::: :::: +##### Considerations -::::{note} -By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting the heap size. - -When installing {{es}} on Windows as a service for the first time or running {{es}} from the command line, you can manually [Set the JVM heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size). To resize the heap for an already installed service, use the service manager: `bin\elasticsearch-service.bat manager`. - -:::: - +* By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting the heap size. + + When installing {{es}} on Windows as a service for the first time or running {{es}} from the command line, you can manually [Set the JVM heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size). To resize the heap for an already installed service, use the manager GUI. -::::{note} -The service automatically configures a private temporary directory for use by {{es}} when it is running. This private temporary directory is configured as a sub-directory of the private temporary directory for the user running the installation. If the service will run under a different user, you can configure the location of the temporary directory that the service should use by setting the environment variable `ES_TMPDIR` to the preferred location before you execute the service installation. -:::: +* The service automatically configures a private temporary directory for use by {{es}} when it is running. This private temporary directory is configured as a sub-directory of the private temporary directory for the user running the installation. If the service will run under a different user, you can configure the location of the temporary directory that the service should use by setting the environment variable `ES_TMPDIR` to the preferred location before you execute the service installation. +* The system environment variable `ES_JAVA_HOME` should be set to the path of the JDK installation that you want the service to use. If you upgrade the JDK, you are not required to the reinstall the service, but you must set the value of the system environment variable `ES_JAVA_HOME` to the path to the new JDK installation. Upgrading across JVM types (e.g. JRE versus SE) is not supported, and requires the service to be reinstalled. -Using the Manager GUI -: It is also possible to configure the service after it’s been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Invoke `elasticsearch-service.bat manager` from the command-line to open the manager window. +## Step 4: Check that {{es}} is running [_check_that_elasticsearch_is_running_2] -Most changes (like JVM settings) made through the manager GUI will require a restart of the service to take affect. +:::{include} _snippets/check-es-running.md +::: ## Connect clients to {{es}} [_connect_clients_to_es_4] diff --git a/deploy-manage/deploy/self-managed/setting-system-settings.md b/deploy-manage/deploy/self-managed/setting-system-settings.md index 257e56915e..6df1f96f2c 100644 --- a/deploy-manage/deploy/self-managed/setting-system-settings.md +++ b/deploy-manage/deploy/self-managed/setting-system-settings.md @@ -48,7 +48,7 @@ elasticsearch - nofile 65535 This change will only take effect the next time the `elasticsearch` user opens a new session. -::::{admonition} Ubuntu and `limits.conf` +::::{admonition} Ubuntu and limits.conf :class: note Ubuntu ignores the `limits.conf` file for processes started by `init.d`. To enable the `limits.conf` file, edit `/etc/pam.d/su` and uncomment the following line: diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index bc9dc901af..600e5fa572 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -1,106 +1,66 @@ --- mapped_urls: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html - - https://www.elastic.co/guide/en/elasticsearch/reference/current/stopping-elasticsearch.html + - https://www.elastic.co/guide/en/{{es}}/reference/current/starting-elasticsearch.html + - https://www.elastic.co/guide/en/{{es}}/reference/current/stopping-elasticsearch.html applies_to: deployment: self: --- -# Start and stop Elasticsearch +# Start and stop {{es}} -Understanding how to properly start and stop Elasticsearch is essential for maintaining a stable and efficient cluster. This guide outlines the recommended methods for starting and stopping {{es}} safely, considering the different installation types, including package-based installations, Docker containers, and manually extracted archives. +Understanding how to properly start and stop {{es}} is essential for maintaining a stable and efficient cluster. This guide outlines the recommended methods for starting and stopping {{es}} safely, considering the different installation types, including package-based installations, Docker containers, and manually extracted archives. -## Starting Elasticsearch [starting-elasticsearch] +## Starting {{es}} [starting-{{es}}] The method for starting {{es}} varies depending on how you installed it. ### Archive packages (`.tar.gz`) [start-targz] -If you installed {{es}} with a `.tar.gz` package, you can start {{es}} from the command line. +If you installed {{es}} on Linux or MacOS with a `.tar.gz` package, you can start {{es}} from the command line. #### Run {{es}} from the command line [_run_es_from_the_command_line] :::{include} /deploy-manage/deploy/self-managed/_snippets/targz-start.md ::: -If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md#security-at-startup). +If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-{{es}}-from-archive-on-linux-macos.md#security-at-startup). #### Run as a daemon [_run_as_a_daemon] -To run Elasticsearch as a daemon, specify `-d` on the command line, and record the process ID in a file using the `-p` option: - -```sh -./bin/elasticsearch -d -p pid -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. Refer to [Secure settings](../../../deploy-manage/security/secure-settings.md) for more details. - -Log messages can be found in the `$ES_HOME/logs/` directory. - -To shut down Elasticsearch, kill the process ID recorded in the `pid` file: - -```sh -pkill -F pid -``` - -::::{note} -The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](../../../deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md#start-deb) or [RPM](../../../deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md#start-rpm) package instead. -:::: +:::{include} /deploy-manage/deploy/self-managed/_snippets/targz-daemon.md +::: ### Archive packages (`.zip`) [start-zip] -If you installed {{es}} on Windows with a `.zip` package, you can start {{es}} from the command line. If you want {{es}} to start automatically at boot time without any user interaction, [install {{es}} as a service](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md#windows-service). - -#### Run {{es}} from the command line [_run_es_from_the_command_line_2] - -Run the following command to start {{es}} from the command line: - -```sh -.\bin\elasticsearch.bat -``` - -When starting {{es}} for the first time, security features are enabled and configured by default. The following security configuration occurs automatically: +If you installed {{es}} on Windows with a `.zip` package, you can start {{es}} from the command line. If you want {{es}} to start automatically at boot time without any user interaction, [install {{es}} as a service](../../../deploy-manage/deploy/self-managed/install-{{es}}-with-zip-on-windows.md#windows-service). -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -* An enrollment token is generated for {{kib}}, which is valid for 30 minutes. - -The password for the `elastic` user and the enrollment token for {{kib}} are output to your terminal. - -We recommend storing the `elastic` password as an environment variable in your shell. Example: - -```sh -$ELASTIC_PASSWORD = "your_password" -``` - -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../../deploy-manage/security/secure-settings.md) for more details. - -By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](../../../deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. +:::{include} /deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md +::: -To stop {{es}}, press `Ctrl-C`. +If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md#security-at-startup). ### Debian packages [start-deb] -#### Running Elasticsearch with `systemd` [start-es-deb-systemd] +#### Running {{es}} with `systemd` [start-es-deb-systemd] -To configure Elasticsearch to start automatically when the system boots up, run the following commands: +To configure {{es}} to start automatically when the system boots up, run the following commands: ```sh sudo /bin/systemctl daemon-reload sudo /bin/systemctl enable elasticsearch.service ``` -Elasticsearch can be started and stopped as follows: +{{es}} can be started and stopped as follows: ```sh sudo systemctl start elasticsearch.service sudo systemctl stop elasticsearch.service ``` -These commands provide no feedback as to whether Elasticsearch was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. +These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/{{es}}/`. -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once Elasticsearch is up and running. +If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. ```sh echo "keystore_password" > /path/to/my_pwd_file.tmp @@ -109,7 +69,7 @@ sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file. sudo systemctl start elasticsearch.service ``` -By default the Elasticsearch service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. +By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: @@ -119,13 +79,13 @@ To tail the journal: sudo journalctl -f ``` -To list journal entries for the elasticsearch service: +To list journal entries for the {{es}} service: ```sh sudo journalctl --unit elasticsearch ``` -To list journal entries for the elasticsearch service starting from a given time: +To list journal entries for the {{es}} service starting from a given time: ```sh sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" @@ -149,11 +109,11 @@ Versions of `systemd` prior to 238 do not support the timeout extension mechanis However the `systemd` logs will report that the startup timed out: ```text -Jan 31 01:22:30 debian systemd[1]: Starting Elasticsearch... +Jan 31 01:22:30 debian systemd[1]: Starting elasticsearch... Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start Elasticsearch. +Jan 31 01:37:15 debian systemd[1]: Failed to start elasticsearch. ``` To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. @@ -162,29 +122,29 @@ To avoid this, upgrade your `systemd` to at least version 238. You can also temp ### Docker images [start-docker] -If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [Run {{es}} in Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md#docker-cli-run-dev-mode). +If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [Run {{es}} in Docker](../../../deploy-manage/deploy/self-managed/install-{{es}}-with-docker.md#docker-cli-run-dev-mode). ### RPM packages [start-rpm] -#### Running Elasticsearch with `systemd` [start-es-rpm-systemd] +#### Running {{es}} with `systemd` [start-es-rpm-systemd] -To configure Elasticsearch to start automatically when the system boots up, run the following commands: +To configure {{es}} to start automatically when the system boots up, run the following commands: ```sh sudo /bin/systemctl daemon-reload sudo /bin/systemctl enable elasticsearch.service ``` -Elasticsearch can be started and stopped as follows: +{{es}} can be started and stopped as follows: ```sh sudo systemctl start elasticsearch.service sudo systemctl stop elasticsearch.service ``` -These commands provide no feedback as to whether Elasticsearch was started successfully or not. Instead, this information will be written in the log files located in `/var/log/elasticsearch/`. +These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/{{es}}/`. -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once Elasticsearch is up and running. +If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. ```sh echo "keystore_password" > /path/to/my_pwd_file.tmp @@ -193,7 +153,7 @@ sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file. sudo systemctl start elasticsearch.service ``` -By default the Elasticsearch service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. +By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: @@ -203,13 +163,13 @@ To tail the journal: sudo journalctl -f ``` -To list journal entries for the elasticsearch service: +To list journal entries for the {{es}} service: ```sh sudo journalctl --unit elasticsearch ``` -To list journal entries for the elasticsearch service starting from a given time: +To list journal entries for the {{es}} service starting from a given time: ```sh sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" @@ -233,31 +193,31 @@ Versions of `systemd` prior to 238 do not support the timeout extension mechanis However the `systemd` logs will report that the startup timed out: ```text -Jan 31 01:22:30 debian systemd[1]: Starting Elasticsearch... +Jan 31 01:22:30 debian systemd[1]: Starting elasticsearch... Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start Elasticsearch. +Jan 31 01:37:15 debian systemd[1]: Failed to start elasticsearch. ``` To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. :::: -## Stopping Elasticsearch [stopping-elasticsearch] +## Stopping {{es}} [stopping-elasticsearch] -An orderly shutdown of Elasticsearch ensures that Elasticsearch has a chance to cleanup and close outstanding resources. For example, a node that is shutdown in an orderly fashion will remove itself from the cluster, sync translogs to disk, and perform other related cleanup activities. You can help ensure an orderly shutdown by properly stopping Elasticsearch. +An orderly shutdown of {{es}} ensures that {{es}} has a chance to cleanup and close outstanding resources. For example, a node that is shutdown in an orderly fashion will remove itself from the cluster, sync translogs to disk, and perform other related cleanup activities. You can help ensure an orderly shutdown by properly stopping {{es}}. -If you’re running Elasticsearch as a service, you can stop Elasticsearch via the service management functionality provided by your installation. +If you’re running {{es}} as a service, you can stop {{es}} via the service management functionality provided by your installation. -If you’re running Elasticsearch directly, you can stop Elasticsearch by sending control-C if you’re running Elasticsearch in the console, or by sending `SIGTERM` to the Elasticsearch process on a POSIX system. You can obtain the PID to send the signal to via various tools (for example, `ps` or `jps`): +If you’re running {{es}} directly, you can stop {{es}} by sending `Ctrl`+`C` if you’re running {{es}} in the console, or by sending `SIGTERM` to the {{es}} process on a POSIX system. You can obtain the PID to send the signal to via various tools (for example, `ps` or `jps`): ```sh -$ jps | grep Elasticsearch -14542 Elasticsearch +$ jps | grep elasticsearch +14542 elasticsearch ``` -From the Elasticsearch startup logs: +From the {{es}} startup logs: ```sh [2016-07-07 12:26:18,908][INFO ][node ] [I8hydUG] version[5.0.0-alpha4], pid[15399], build[3f5b994/2016-06-27T16:23:46.861Z], OS[Mac OS X/10.11.5/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_92/25.92-b14] @@ -266,47 +226,28 @@ From the Elasticsearch startup logs: Or by specifying a location to write a PID file to on startup (`-p `): ```sh -$ ./bin/elasticsearch -p /tmp/elasticsearch-pid -d -$ cat /tmp/elasticsearch-pid && echo +$ ./bin/{{es}} -p /tmp/{{es}}-pid -d +$ cat /tmp/{{es}}-pid && echo 15516 $ kill -SIGTERM 15516 ``` -### Stopping on Fatal Errors [fatal-errors] - -During the life of the Elasticsearch virtual machine, certain fatal errors could arise that put the virtual machine in a questionable state. Such fatal errors include out of memory errors, internal errors in virtual machine, and serious I/O errors. - -When Elasticsearch detects that the virtual machine has encountered such a fatal error Elasticsearch will attempt to log the error and then will halt the virtual machine. When Elasticsearch initiates such a shutdown, it does not go through an orderly shutdown as described above. The Elasticsearch process will also return with a special status code indicating the nature of the error. - -Killed by jvmkiller agent -: 158 - -User or kernel SIGTERM -: 143 - -Slain by kernel oom-killer -: 137 - -Segmentation fault -: 134 - -JVM internal error -: 128 - -Out of memory error -: 127 - -Stack overflow error -: 126 - -Unknown virtual machine error -: 125 +### Stopping on fatal errors [fatal-errors] -Serious I/O error -: 124 +During the life of the {{es}} virtual machine, certain fatal errors could arise that put the virtual machine in a questionable state. Such fatal errors include out of memory errors, internal errors in virtual machine, and serious I/O errors. -Bootstrap check failure -: 78 +When {{es}} detects that the virtual machine has encountered such a fatal error {{es}} will attempt to log the error and then will halt the virtual machine. When {{es}} initiates such a shutdown, it does not go through an orderly shutdown as described above. The {{es}} process will also return with a special status code indicating the nature of the error. -Unknown fatal error -: 1 +| Status code | Error | +| --- | --- | +| 1 | Unknown fatal error | +| 78 | Bootstrap check failure | +| 124 | Serious I/O error | +| 125 | Unknown virtual machine error | +| 126 | Stack overflow error | +| 127 | Out of memory error | +| 128 | JVM internal error | +| 134 | Segmentation fault | +| 137 | Slain by kernel oom-killer | +| 143 | User or kernel SIGTERM | +| 158 | Killed by jvmkiller agent | diff --git a/deploy-manage/toc.yml b/deploy-manage/toc.yml index de28479b23..7beaed6e2c 100644 --- a/deploy-manage/toc.yml +++ b/deploy-manage/toc.yml @@ -345,6 +345,11 @@ toc: - file: deploy/self-managed/install-elasticsearch-with-debian-package.md - file: deploy/self-managed/install-elasticsearch-with-rpm.md - file: deploy/self-managed/install-elasticsearch-with-docker.md + children: + - file: deploy/self-managed/install-elasticsearch-docker-basic.md + - file: deploy/self-managed/install-elasticsearch-docker-compose.md + - file: deploy/self-managed/install-elasticsearch-docker-prod.md + - file: deploy/self-managed/install-elasticsearch-docker-configure.md - file: deploy/self-managed/local-development-installation-quickstart.md - file: deploy/self-managed/bootstrap-checks.md - file: deploy/self-managed/configure-elasticsearch.md diff --git a/solutions/search/run-elasticsearch-locally.md b/solutions/search/run-elasticsearch-locally.md index 8d644576eb..dc369f59ab 100644 --- a/solutions/search/run-elasticsearch-locally.md +++ b/solutions/search/run-elasticsearch-locally.md @@ -8,51 +8,5 @@ applies_to: # Run {{es}} locally [run-elasticsearch-locally] -::::{warning} -**DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS** - -The instructions on this page are for **local development only**. Do not use this configuration for production deployments, because it is not secure. Refer to [deployment options](../../get-started/deployment-options.md) for a list of production deployment options. - -:::: - -Quickly set up {{es}} and {{kib}} in Docker for local development or testing, using the [`start-local` script](https://github.com/elastic/start-local?tab=readme-ov-file#-try-elasticsearch-and-kibana-locally). - -This setup comes with a one-month trial license that includes all Elastic features. After the trial period, the license reverts to **Free and open - Basic**. Refer to [Elastic subscriptions](https://www.elastic.co/subscriptions) for more information. - -## Prerequisites [local-dev-prerequisites] - -* If you don’t have Docker installed, [download and install Docker Desktop](https://www.docker.com/products/docker-desktop) for your operating system. -* If you’re using Microsoft Windows, then install [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install). - -## Run `start-local` script [local-dev-quick-start] - -To set up {{es}} and {{kib}} locally, run the `start-local` script: - -```sh -curl -fsSL https://elastic.co/start-local | sh -``` - -This script creates an `elastic-start-local` folder containing configuration files and starts both {{es}} and {{kib}} using Docker. - -After running the script, you can access Elastic services at the following endpoints: - -* **{{es}}**: [http://localhost:9200](http://localhost:9200) -* **{{kib}}**: [http://localhost:5601](http://localhost:5601) - -The script generates a random password for the `elastic` user, and an API key, stored in the `.env` file. - -::::{warning} -This setup is for local testing only. HTTPS is disabled, and Basic authentication is used for {{es}}. For security, {{es}} and {{kib}} are accessible only through `localhost`. - -:::: - - - -## Learn more [local-dev-additional-info] - -For more detailed information about the `start-local` setup, refer to the [README on GitHub](https://github.com/elastic/start-local). Learn about customizing the setup, logging, and more. - - -## Next steps [local-dev-next-steps] - -Use our [quick start guides](https://www.elastic.co/guide/en/elasticsearch/reference/current/quickstart.html) to learn the basics of {{es}}. +:::{include} /deploy-manage/deploy/self-managed/_snippets/start-local.md +::: From 25a613ef885a90b62109925ebb15f78ec5fca329 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 17:29:11 -0500 Subject: [PATCH 07/43] more --- .../install-elasticsearch-docker-basic.md | 2 +- .../install-elasticsearch-docker-configure.md | 10 +++++----- .../install-elasticsearch-docker-prod.md | 12 ++++++++---- .../install-elasticsearch-with-docker.md | 2 +- .../deploy/self-managed/installing-elasticsearch.md | 2 +- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md index 5b1f8271d8..0778202785 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md @@ -12,7 +12,7 @@ navigation_title: Single-node cluster Use Docker commands to start a single-node {{es}} cluster for development or testing. You can then run additional Docker commands to add nodes to the test cluster or run {{kib}}. ::::{tip} -* If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. +* If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Please note that this setup is not suitable for production environments. * This setup doesn’t run multiple {{es}} nodes or {{kib}} by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. See [Start a multi-node cluster with Docker Compose](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md). :::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md index e2823664d5..6d715c5e8f 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -13,7 +13,7 @@ When you run in Docker, the [{{es}} configuration files](configure-elasticsearch To use custom configuration files, you [bind-mount the files](#docker-config-bind-mount) over the configuration files in the image. -You can set individual {{es}} configuration parameters using Docker environment variables. The [sample compose file](#docker-compose-file) and the [single-node example](#docker-cli-run-dev-mode) use this method. You can use the setting name directly as the environment variable name. If you cannot do this, for example because your orchestration platform forbids periods in environment variable names, then you can use an alternative style by converting the setting name as follows. +You can set individual {{es}} configuration parameters using Docker environment variables. The [sample compose file](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md) and the [single-node example](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md) use this method. You can use the setting name directly as the environment variable name. If you can't do this, for example because your orchestration platform forbids periods in environment variable names, then you can use an alternative style by converting the setting name as follows: 1. Change the setting name to uppercase 2. Prefix it with `ES_SETTING_` @@ -38,7 +38,7 @@ docker run bin/elasticsearch -Ecluster.name=mynewclusternam While bind-mounting your configuration files is usually the preferred method in production, you can also [create a custom Docker image](#_c_customized_image) that contains your configuration. -### Mounting {{es}} configuration files [docker-config-bind-mount] +## Mounting {{es}} configuration files [docker-config-bind-mount] Create custom config files and bind-mount them over the corresponding files in the Docker image. For example, to bind-mount `custom_elasticsearch.yml` with `docker run`, specify: @@ -54,7 +54,7 @@ The container **runs {{es}} as user `elasticsearch` using uid:gid `1000:0`**. Bi -### Create an encrypted {{es}} keystore [docker-keystore-bind-mount] +## Create an encrypted {{es}} keystore [docker-keystore-bind-mount] By default, {{es}} will auto-generate a keystore file for [secure settings](../../security/secure-settings.md). This file is obfuscated but not encrypted. @@ -91,7 +91,7 @@ If you’ve already created the keystore and don’t need to update it, you can ``` -### Using custom Docker images [_c_customized_image] +## Using custom Docker images [_c_customized_image] In some environments, it might make more sense to prepare a custom image that contains your configuration. A `Dockerfile` to achieve this might be as simple as: @@ -115,7 +115,7 @@ Some plugins require additional security permissions. You must explicitly accept See [Plugin management](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. -### Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] +## Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] Here’s how to resolve common errors when running {{es}} with Docker. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index c66513eeb0..dc52aa5023 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -177,16 +177,20 @@ The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend ## Pin deployments to a specific image version [_pin_deployments_to_a_specific_image_version] -Pin your deployments to a specific version of the {{es}} Docker image. For example `docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}}`. +Pin your deployments to a specific version of the {{es}} Docker image. For example: + +```sh +docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} +``` ## Always bind data volumes [_always_bind_data_volumes] You should use a volume bound on `/usr/share/elasticsearch/data` for the following reasons: -1. The data of your {{es}} node won’t be lost if the container is killed -2. {{es}} is I/O sensitive and the Docker storage driver is not ideal for fast I/O -3. It allows the use of advanced [Docker volume plugins](https://docs.docker.com/engine/extend/plugins/#volume-plugins) +1. The data of your {{es}} node won’t be lost if the container is killed. +2. {{es}} is I/O sensitive and the Docker storage driver is not ideal for fast I/O. +3. It allows the use of advanced [Docker volume plugins](https://docs.docker.com/engine/extend/plugins/#volume-plugins). ## Avoid using `loop-lvm` mode [_avoid_using_loop_lvm_mode] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 5bc1252a62..37b1e7beb2 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -17,7 +17,7 @@ Docker images for {{es}} are available from the Elastic Docker registry. A list ::: ::::{tip} -If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. +If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Please note that this setup is not suitable for production environments. :::: Review the following guides to install {{es}} with Docker: diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index 03c4c7e1aa..a72b1e77b8 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -68,7 +68,7 @@ If you want to install and manage {{es}} yourself, you can: * Run {{es}} in a [Docker container](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). ::::{tip} -To try out on your own machine, we recommend using Docker and running both {{es}} and Kibana. For more information, see [Run {{es}} locally](../../../solutions/search/get-started.md). This setup is not suitable for production use. +To try out on your own machine, we recommend using Docker and running both {{es}} and Kibana. For more information, see [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). This setup is not suitable for production use. :::: ## {{es}} install packages [elasticsearch-install-packages] From 3127d420cc7bfadbb345cefba90af48a2b41c681 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 17:59:20 -0500 Subject: [PATCH 08/43] errors batch --- .../deploy/self-managed/_snippets/auto-security-config.md | 4 ++-- .../deploy/self-managed/_snippets/install-next-steps.md | 6 +++--- .../deploy/self-managed/_snippets/java-version.md | 2 +- deploy-manage/deploy/self-managed/_snippets/pw-env-var.md | 2 +- .../deploy/self-managed/_snippets/security-files.md | 2 +- .../deploy/self-managed/_snippets/start-local.md | 2 +- .../deploy/self-managed/_snippets/targz-daemon.md | 4 ++-- .../deploy/self-managed/_snippets/targz-start.md | 2 +- .../deploy/self-managed/_snippets/zip-windows-start.md | 2 +- deploy-manage/deploy/self-managed/bootstrap-checks.md | 4 ++-- deploy-manage/deploy/self-managed/deploy-cluster.md | 2 +- .../deploy/self-managed/executable-jna-tmpdir.md | 2 +- .../self-managed/important-settings-configuration.md | 4 ++-- .../self-managed/install-elasticsearch-docker-compose.md | 2 +- .../install-elasticsearch-docker-configure.md | 6 +++--- .../self-managed/install-elasticsearch-docker-prod.md | 8 ++++---- .../install-elasticsearch-from-archive-on-linux-macos.md | 2 +- .../install-elasticsearch-with-debian-package.md | 2 +- .../deploy/self-managed/install-elasticsearch-with-rpm.md | 4 ++-- .../install-elasticsearch-with-zip-on-windows.md | 4 ++-- deploy-manage/deploy/self-managed/install-on-windows.md | 2 ++ .../deploy/self-managed/install-with-debian-package.md | 2 ++ deploy-manage/deploy/self-managed/install-with-docker.md | 6 +++--- deploy-manage/deploy/self-managed/install-with-rpm.md | 2 ++ .../start-stop-services/start-stop-elasticsearch.md | 6 +++--- .../elasticsearch-reference/configuring-stack-security.md | 4 ++-- .../elasticsearch-reference/es-security-principles.md | 2 +- .../elasticsearch-reference/secure-cluster.md | 4 ++-- .../elasticsearch-reference/security-basic-setup.md | 2 +- .../kibana/kibana/elasticsearch-mutual-tls.md | 4 ++-- .../kibana/kibana/using-kibana-with-security.md | 2 +- 31 files changed, 54 insertions(+), 48 deletions(-) diff --git a/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md b/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md index f44ca254af..6f9c39948d 100644 --- a/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md +++ b/deploy-manage/deploy/self-managed/_snippets/auto-security-config.md @@ -1,6 +1,6 @@ When you start {{es}} for the first time, the following security configuration occurs automatically: -* [Certificates and keys](../../../deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. +* [Certificates and keys](/deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. * The TLS configuration settings are written to `elasticsearch.yml`. * A password is generated for the `elastic` user. * An enrollment token is generated for {{kib}}, which is valid for 30 minutes. @@ -8,5 +8,5 @@ When you start {{es}} for the first time, the following security configuration o You can then start {{kib}} and enter the enrollment token. This token automatically applies the security settings from your {{es}} cluster, authenticates to {{es}} with the built-in `kibana` service account, and writes the security configuration to `kibana.yml`. ::::{note} -There are [some cases](../../../deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. +There are [some cases](/deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md index 3f45e867fe..c1f7d4767b 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -1,5 +1,5 @@ You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: -* Learn how to [configure {{es}}](configure-elasticsearch.md). -* Configure [important {{es}} settings](important-settings-configuration.md). -* Configure [important system settings](important-system-configuration.md). \ No newline at end of file +* Learn how to [configure {{es}}](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). +* Configure [important {{es}} settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). +* Configure [important system settings](/deploy-manage/deploy/self-managed/important-system-configuration.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/java-version.md b/deploy-manage/deploy/self-managed/_snippets/java-version.md index 782fa70c58..ba1e89bf78 100644 --- a/deploy-manage/deploy/self-managed/_snippets/java-version.md +++ b/deploy-manage/deploy/self-managed/_snippets/java-version.md @@ -1,3 +1,3 @@ ::::{note} -{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](installing-elasticsearch.md#jvm-version). +{{es}} includes a bundled version of [OpenJDK](https://openjdk.java.net) from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the [JVM version requirements](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#jvm-version). :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md index 269ad49267..8eb332503a 100644 --- a/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md +++ b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md @@ -6,6 +6,6 @@ We recommend storing the `elastic` password as an environment variable in your s {{export}}ELASTIC_PASSWORD="your_password" ``` -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](/deploy-manage/security/secure-settings.md) for more details. To learn how to reset this password, refer to [](/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-sm.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/security-files.md b/deploy-manage/deploy/self-managed/_snippets/security-files.md index fda688ea6e..33738d807c 100644 --- a/deploy-manage/deploy/self-managed/_snippets/security-files.md +++ b/deploy-manage/deploy/self-managed/_snippets/security-files.md @@ -9,7 +9,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](/deploy-manage/security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: diff --git a/deploy-manage/deploy/self-managed/_snippets/start-local.md b/deploy-manage/deploy/self-managed/_snippets/start-local.md index f980c71db6..f55ea02d5e 100644 --- a/deploy-manage/deploy/self-managed/_snippets/start-local.md +++ b/deploy-manage/deploy/self-managed/_snippets/start-local.md @@ -1,7 +1,7 @@ ::::{warning} **DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS** -The instructions on this page are for **local development only**. Do not use this configuration for production deployments, because it is not secure. Refer to [deployment options](../../get-started/deployment-options.md) for a list of production deployment options. +The instructions on this page are for **local development only**. Do not use this configuration for production deployments, because it is not secure. Refer to [](/get-started/deployment-options.md) for a list of production deployment options. :::: diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md index 3ff887328d..83e68d4144 100644 --- a/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md +++ b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md @@ -4,7 +4,7 @@ To run {{es}} as a daemon, specify `-d` on the command line, and record the proc ./bin/elasticsearch -d -p pid ``` -If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](../../security/secure-settings.md) for more details. +If you have password-protected the {{es}} keystore, you will be prompted to enter the keystore’s password. See [Secure settings](/deploy-manage/security/secure-settings.md) for more details. Log messages can be found in the `$ES_HOME/logs/` directory. @@ -15,5 +15,5 @@ pkill -F pid ``` ::::{note} -The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-deb) or [RPM](../../maintenance/start-stop-services/start-stop-elasticsearch.md#start-rpm) package instead. +The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](/deploy-manage/deploy/self-managed/install-with-debian-package.md) or [RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) package instead. :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-start.md b/deploy-manage/deploy/self-managed/_snippets/targz-start.md index b93f5a177c..0fdbae4f17 100644 --- a/deploy-manage/deploy/self-managed/_snippets/targz-start.md +++ b/deploy-manage/deploy/self-managed/_snippets/targz-start.md @@ -3,7 +3,7 @@ Run the following command to start {{es}} from the command line: ```sh ./bin/elasticsearch ``` -By default, {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. +By default, {{es}} prints its logs to the console (`stdout`) and to the `.log` file within the [logs directory](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. To stop {{es}}, press `Ctrl-C`. diff --git a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md index 1314933873..8e486452ea 100644 --- a/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md +++ b/deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md @@ -4,6 +4,6 @@ Run the following command to start {{es}} from the command line: .\bin\elasticsearch.bat ``` -By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. +By default {{es}} prints its logs to the console (`STDOUT`) and to the `.log` file within the [logs directory](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings). {{es}} logs some information while it is starting, but after it has finished initializing it will continue to run in the foreground and won’t log anything further until something happens that is worth recording. While {{es}} is running you can interact with it through its HTTP interface which is on port `9200` by default. To stop {{es}}, press `Ctrl-C`. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks.md b/deploy-manage/deploy/self-managed/bootstrap-checks.md index 1b73aed78f..8401249a51 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks.md @@ -103,7 +103,7 @@ To pass the maximum size virtual memory check, you must configure your system to $$$bootstrap-checks-max-map-count$$$ -Continuing from the previous [point](max-size-virtual-memory-check.md), to use `mmap` effectively, {{es}} also requires the ability to create many memory-mapped areas. The maximum map count check checks that the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. +In addition to [unlimited address space](#max-size-virtual-memory-check), to use `mmap` effectively, {{es}} also requires the ability to create many memory-mapped areas. The maximum map count check checks that the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. @@ -174,7 +174,7 @@ This bootstrap check ensures that discovery is not running with the default conf * `discovery.seed_providers` * `cluster.initial_master_nodes` -Note that you must [remove `cluster.initial_master_nodes` from the configuration of every node](important-settings-configuration.md#initial_master_nodes) after the cluster has started for the first time. Instead, configure `discovery.seed_hosts` or `discovery.seed_providers`. If you do not need any discovery configuration, for instance if running a single-node cluster, set `discovery.seed_hosts: []` to disable discovery and satisfy this bootstrap check. +Note that you must [remove `cluster.initial_master_nodes` from the configuration of every node](/deploy-manage/deploy/self-managed/important-settings-configuration.md#initial_master_nodes) after the cluster has started for the first time. Instead, configure `discovery.seed_hosts` or `discovery.seed_providers`. If you do not need any discovery configuration, for instance if running a single-node cluster, set `discovery.seed_hosts: []` to disable discovery and satisfy this bootstrap check. ::: :::{dropdown} Encrypt sensitive data check diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/deploy-cluster.md index 8c9e3533cb..1e1a2bb1b2 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/deploy-cluster.md @@ -24,7 +24,7 @@ $$$dedicated-host$$$ **This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md) +* [/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md] * [/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md) % Doesn't exist diff --git a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md index a9a4e4325f..17c799c748 100644 --- a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md +++ b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md @@ -17,7 +17,7 @@ This is only relevant for Linux. By default, {{es}} will create its temporary directory within `/tmp`. However, some hardened Linux installations mount `/tmp` with the `noexec` option by default. This prevents JNA and `libffi` from working correctly. For instance, at startup JNA may fail to load with an `java.lang.UnsatisfiedLinkerError` exception or with a message that says something similar to `failed to map segment from shared object`, or `libffi` may report a message such as `failed to allocate closure`. Note that the exception messages can differ between JVM versions. Additionally, the components of {{es}} that rely on execution of native code via JNA may fail with messages indicating that it is `because JNA is not available`. -To resolve these problems, either remove the `noexec` option from your `/tmp` filesystem, or configure {{es}} to use a different location for its temporary directory by setting the [`$ES_TMPDIR`](important-settings-configuration.md#es-tmpdir) environment variable. For instance: +To resolve these problems, either remove the `noexec` option from your `/tmp` filesystem, or configure {{es}} to use a different location for its temporary directory by setting the [`$ES_TMPDIR`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#es-tmpdir) environment variable. For instance: * If you are running {{es}} directly from a shell, set `$ES_TMPDIR` as follows: diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index 2eee82acca..2ec703882a 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -114,7 +114,7 @@ Configure two important discovery and cluster formation settings before going to Out of the box, without any network configuration, {{es}} will bind to the available loopback addresses and scan local ports `9300` to `9305` to connect with other nodes running on the same server. This behavior provides an auto-clustering experience without having to do any configuration. -When you want to form a cluster with nodes on other hosts, use the [static](configure-elasticsearch.md#static-cluster-setting) `discovery.seed_hosts` setting. This setting provides a list of other nodes in the cluster that are master-eligible and likely to be live and contactable to seed the [discovery process](../../distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md). This setting accepts a YAML sequence or array of the addresses of all the master-eligible nodes in the cluster. Each address can be either an IP address or a hostname that resolves to one or more IP addresses via DNS. +When you want to form a cluster with nodes on other hosts, use the [static](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#static-cluster-setting) `discovery.seed_hosts` setting. This setting provides a list of other nodes in the cluster that are master-eligible and likely to be live and contactable to seed the [discovery process](../../distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md). This setting accepts a YAML sequence or array of the addresses of all the master-eligible nodes in the cluster. Each address can be either an IP address or a hostname that resolves to one or more IP addresses via DNS. ```yaml discovery.seed_hosts: @@ -134,7 +134,7 @@ If your master-eligible nodes do not have fixed names or addresses, use an [alte ### `cluster.initial_master_nodes` [initial_master_nodes] -When you start an {{es}} cluster for the first time, a [cluster bootstrapping](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) step determines the set of master-eligible nodes whose votes are counted in the first election. In [development mode](bootstrap-checks.md#dev-vs-prod-mode), with no discovery settings configured, this step is performed automatically by the nodes themselves. +When you start an {{es}} cluster for the first time, a [cluster bootstrapping](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) step determines the set of master-eligible nodes whose votes are counted in the first election. In [development mode](/deploy-manage/deploy/self-managed/bootstrap-checks.md#bootstrap-checks.md#dev-vs-prod-mode), with no discovery settings configured, this step is performed automatically by the nodes themselves. Because auto-bootstrapping is [inherently unsafe](../../distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md), when starting a new cluster in production mode, you must explicitly list the master-eligible nodes whose votes should be counted in the very first election. You set this list using the `cluster.initial_master_nodes` setting on every master-eligible node. Do not configure this setting on master-ineligible nodes. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md index a78bc2dfee..b819458791 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md @@ -89,4 +89,4 @@ docker-compose down -v ## Next steps [_next_steps_6] -You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](#docker-prod-prerequisites) to apply when running {{es}} in Docker in production. \ No newline at end of file +You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, review the [requirements and recommendations](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md) to apply when running {{es}} in Docker in production. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md index 6d715c5e8f..69bd81ee21 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -9,7 +9,7 @@ navigation_title: Configure # Configure {{es}} with Docker [docker-configuration-methods] -When you run in Docker, the [{{es}} configuration files](configure-elasticsearch.md#config-files-location) are loaded from `/usr/share/elasticsearch/config/`. +When you run in Docker, the [{{es}} configuration files](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location) are loaded from `/usr/share/elasticsearch/config/`. To use custom configuration files, you [bind-mount the files](#docker-config-bind-mount) over the configuration files in the image. @@ -36,7 +36,7 @@ You can override the default command for the image to pass {{es}} configuration docker run bin/elasticsearch -Ecluster.name=mynewclustername ``` -While bind-mounting your configuration files is usually the preferred method in production, you can also [create a custom Docker image](#_c_customized_image) that contains your configuration. +While bind-mounting your configuration files is usually the preferred method in production, you can also [create a custom Docker image](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md#_c_customized_image) that contains your configuration. ## Mounting {{es}} configuration files [docker-config-bind-mount] @@ -56,7 +56,7 @@ The container **runs {{es}} as user `elasticsearch` using uid:gid `1000:0`**. Bi ## Create an encrypted {{es}} keystore [docker-keystore-bind-mount] -By default, {{es}} will auto-generate a keystore file for [secure settings](../../security/secure-settings.md). This file is obfuscated but not encrypted. +By default, {{es}} will auto-generate a keystore file for [secure settings](/deploy-manage/security/secure-settings.md). This file is obfuscated but not encrypted. To encrypt your secure settings with a password and have them persist outside the container, use a `docker run` command to manually create the keystore instead. The command must: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index dc52aa5023..2d7a29c7ad 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -107,7 +107,7 @@ vm.max_map_count = 262144 By default, {{es}} runs inside the container as user `elasticsearch` using uid:gid `1000:0`. -If you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user. In addition, this user must have write access to the [config, data and log dirs](important-settings-configuration.md#path-settings) ({{es}} needs write access to the `config` directory so that it can generate a keystore). A good strategy is to grant group access to gid `0` for the local directory. +If you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user. In addition, this user must have write access to the [config, data and log dirs](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) ({{es}} needs write access to the `config` directory so that it can generate a keystore). A good strategy is to grant group access to gid `0` for the local directory. ::::{important} One exception is [Openshift](https://docs.openshift.com/container-platform/3.6/creating_images/guidelines.md#openshift-specific-guidelines), which runs containers using an arbitrarily assigned user ID. Openshift presents persistent volumes with the gid set to `0`, which works without any adjustments. @@ -123,13 +123,13 @@ chgrp 0 esdatadir You can also run an {{es}} container using both a custom UID and GID. You must ensure that file permissions will not prevent {{es}} from executing. You can use one of two options: -* Bind-mount the `config`, `data` and `logs` directories. If you intend to install plugins and prefer not to [create a custom Docker image](#_c_customized_image), you must also bind-mount the `plugins` directory. +* Bind-mount the `config`, `data` and `logs` directories. If you intend to install plugins and prefer not to [create a custom Docker image](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md#_c_customized_image), you must also bind-mount the `plugins` directory. * Pass the `--group-add 0` command line option to `docker run`. This ensures that the user under which {{es}} is running is also a member of the `root` (GID 0) group inside the container. ## Increase ulimits for nofile and nproc [_increase_ulimits_for_nofile_and_nproc] -Increased ulimits for [nofile](setting-system-settings.md) and [nproc](max-number-threads-check.md) must be available for the {{es}} containers. Verify the [init system](https://github.com/moby/moby/tree/ea4d1243953e6b652082305a9c3cda8656edab26/contrib/init) for the Docker daemon sets them to acceptable values. +Increased ulimits for [nofile](setting-system-settings.md) and [nproc](/deploy-manage/deploy/self-managed/bootstrap-checks.md#max-number-threads-check) must be available for the {{es}} containers. Verify the [init system](https://github.com/moby/moby/tree/ea4d1243953e6b652082305a9c3cda8656edab26/contrib/init) for the Docker daemon sets them to acceptable values. To check the Docker daemon defaults for ulimits, run: @@ -148,7 +148,7 @@ If needed, adjust them in the Daemon or override them per container. For example Swapping needs to be disabled for performance and node stability. For information about ways to do this, see [Disable swapping](setup-configuration-memory.md). -If you opt for the `bootstrap.memory_lock: true` approach, you also need to define the `memlock: true` ulimit in the [Docker Daemon](https://docs.docker.com/engine/reference/commandline/dockerd/#default-ulimits), or explicitly set for the container as shown in the [sample compose file](#docker-compose-file). When using `docker run`, you can specify: +If you opt for the `bootstrap.memory_lock: true` approach, you also need to define the `memlock: true` ulimit in the [Docker Daemon](https://docs.docker.com/engine/reference/commandline/dockerd/#default-ulimits), or explicitly set for the container as shown in the [sample compose file](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md). When using `docker run`, you can specify: ```sh -e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index d0d1fdf2e6..19b981bb59 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -151,7 +151,7 @@ This is convenient because you don’t have to create any directories to start u | --- | --- | --- | --- | | home | {{es}} home directory or `$ES_HOME` | Directory created by unpacking the archive | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `$ES_HOME/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `$ES_HOME/config` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `$ES_HOME/config` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | | conf | Generated TLS keys and certificates for the transport and HTTP layer. | `$ES_HOME/config/certs` | | | data | The location of the data files of each index / shard allocated on the node. | `$ES_HOME/data` | `path.data` | | logs | Log files location. | `$ES_HOME/logs` | `path.logs` | diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 8628bb7fca..602ff526ae 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -177,7 +177,7 @@ The Debian package places config files, logs, and the data directory in the appr | --- | --- | --- | --- | | home | {{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | | conf | Environment variables including heap size, file descriptors. | `/etc/default/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | | data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 41b851c314..27d5d6af27 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -50,7 +50,7 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch You have several options for installing the {{es}} RPM package: * [From the RPM repository](#rpm-repo) -* [Manually](#install-deb) +* [Manually](#install-rpm) ### Install from the RPM repository [rpm-repo] @@ -166,7 +166,7 @@ The RPM places config files, logs, and the data directory in the appropriate loc | --- | --- | --- | --- | | home |{{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | | conf | Environment variables including heap size, file descriptors. | `/etc/sysconfig/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | | data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 2da5fe1b04..3ccee0df9c 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -79,7 +79,7 @@ You can run {{es}} [from the command line](#command-line), or install and run {{ :::{include} _snippets/cmd-line-config.md ::: -### Enroll the node in an existing cluster [_enroll_nodes_in_an_existing_cluster_2] +### Enroll the node in an existing cluster [existing-cluster] :::{include} _snippets/enroll-nodes.md ::: @@ -224,7 +224,7 @@ This is very convenient because you don’t have to create any directories to st | --- | --- | --- | --- | | home | {{es}} home directory or `%ES_HOME%` | Directory created by unpacking the archive | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `%ES_HOME%\bin` | | -| conf | Configuration files including `elasticsearch.yml` | `%ES_HOME%\config` | `[ES_PATH_CONF](configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `%ES_HOME%\config` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | | conf | Generated TLS keys and certificates for the transport and HTTP layer. | `%ES_HOME%\config\certs` | | | data | The location of the data files of each index / shard allocated on the node. | `%ES_HOME%\data` | `path.data` | | logs | Log files location. | `%ES_HOME%\logs` | `path.logs` | diff --git a/deploy-manage/deploy/self-managed/install-on-windows.md b/deploy-manage/deploy/self-managed/install-on-windows.md index 458f64256a..69ecf244a0 100644 --- a/deploy-manage/deploy/self-managed/install-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-on-windows.md @@ -2,6 +2,8 @@ navigation_title: "Install on Windows" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/windows.html +sub: + stack-version: "9.0.0" --- diff --git a/deploy-manage/deploy/self-managed/install-with-debian-package.md b/deploy-manage/deploy/self-managed/install-with-debian-package.md index 24bc50babf..43e1961e0f 100644 --- a/deploy-manage/deploy/self-managed/install-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-with-debian-package.md @@ -2,6 +2,8 @@ navigation_title: "Install with Debian package" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/deb.html +sub: + stack-version: "9.0.0" --- diff --git a/deploy-manage/deploy/self-managed/install-with-docker.md b/deploy-manage/deploy/self-managed/install-with-docker.md index ca2b81dc09..b7c755a963 100644 --- a/deploy-manage/deploy/self-managed/install-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-with-docker.md @@ -21,7 +21,7 @@ These images contain both free and subscription features. [Start a 30-day trial] Use Docker commands to run {{kib}} on a single-node {{es}} cluster for development or testing. ::::{tip} -This setup doesn’t run multiple {{es}} nodes by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. Refer to [Start a multi-node cluster with Docker Compose](install-elasticsearch-with-docker.md#docker-compose-file) in the {{es}} documentation. +This setup doesn’t run multiple {{es}} nodes by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. Refer to [Start a multi-node cluster with Docker Compose](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md) in the {{es}} documentation. :::: @@ -82,7 +82,7 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version} ``` ::::{tip} - Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](install-elasticsearch-with-docker.md#docker-set-heap-size). + Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md#docker-set-heap-size). :::: @@ -170,7 +170,7 @@ services: ## Persist the {{kib}} keystore [_persist_the_kib_keystore] -By default, {{kib}} auto-generates a keystore file for secure settings at startup. To persist your [secure settings](../../security/secure-settings.md), use the `kibana-keystore` utility to bind-mount the parent directory of the keystore to the container. For example: +By default, {{kib}} auto-generates a keystore file for secure settings at startup. To persist your [secure settings](/deploy-manage/security/secure-settings.md), use the `kibana-keystore` utility to bind-mount the parent directory of the keystore to the container. For example: ```sh docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:{{stack-version}} bin/kibana-keystore create diff --git a/deploy-manage/deploy/self-managed/install-with-rpm.md b/deploy-manage/deploy/self-managed/install-with-rpm.md index c05e6133f7..5d6b06e622 100644 --- a/deploy-manage/deploy/self-managed/install-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-with-rpm.md @@ -2,6 +2,8 @@ navigation_title: "Install with RPM" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/rpm.html +sub: + stack-version: "9.0.0" --- diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index 600e5fa572..619f1989df 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -24,7 +24,7 @@ If you installed {{es}} on Linux or MacOS with a `.tar.gz` package, you can star :::{include} /deploy-manage/deploy/self-managed/_snippets/targz-start.md ::: -If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-{{es}}-from-archive-on-linux-macos.md#security-at-startup). +If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md#security-at-startup). #### Run as a daemon [_run_as_a_daemon] @@ -33,7 +33,7 @@ If you're starting {{es}} for the first time, then {{es}} also enables and confi ### Archive packages (`.zip`) [start-zip] -If you installed {{es}} on Windows with a `.zip` package, you can start {{es}} from the command line. If you want {{es}} to start automatically at boot time without any user interaction, [install {{es}} as a service](../../../deploy-manage/deploy/self-managed/install-{{es}}-with-zip-on-windows.md#windows-service). +If you installed {{es}} on Windows with a `.zip` package, you can start {{es}} from the command line. If you want {{es}} to start automatically at boot time without any user interaction, [install {{es}} as a service](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md#windows-service). :::{include} /deploy-manage/deploy/self-managed/_snippets/zip-windows-start.md ::: @@ -122,7 +122,7 @@ To avoid this, upgrade your `systemd` to at least version 238. You can also temp ### Docker images [start-docker] -If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [Run {{es}} in Docker](../../../deploy-manage/deploy/self-managed/install-{{es}}-with-docker.md#docker-cli-run-dev-mode). +If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [Run {{es}} in Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md#docker-cli-run-dev-mode). ### RPM packages [start-rpm] diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md index f68ea7803a..37e372178b 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md @@ -2,7 +2,7 @@ When you start {{es}} for the first time, the following security configuration occurs automatically: -* [Certificates and keys](../../../deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. +* [Certificates and keys](/deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. * The TLS configuration settings are written to `elasticsearch.yml`. * A password is generated for the `elastic` user. * An enrollment token is generated for {{kib}}. @@ -10,7 +10,7 @@ When you start {{es}} for the first time, the following security configuration o You can then start {{kib}} and enter the enrollment token, which is valid for 30 minutes. This token automatically applies the security settings from your {{es}} cluster, authenticates to {{es}} with the built-in `kibana` service account, and writes the security configuration to `kibana.yml`. ::::{note} -There are [some cases](../../../deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. +There are [some cases](/deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. :::: diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/es-security-principles.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/es-security-principles.md index 6537b21eae..7fcf8adce8 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/es-security-principles.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/es-security-principles.md @@ -5,7 +5,7 @@ Protecting your {{es}} cluster and the data it contains is of utmost importance. ## Run {{es}} with security enabled [security-run-with-security] -Never run an {{es}} cluster without security enabled. This principle cannot be overstated. Running {{es}} without security leaves your cluster exposed to anyone who can send network traffic to {{es}}, permitting these individuals to download, modify, or delete any data in your cluster. [Start the {{stack}} with security enabled](../../../deploy-manage/security/security-certificates-keys.md) or [manually configure security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md) to prevent unauthorized access to your clusters and ensure that internode communication is secure. +Never run an {{es}} cluster without security enabled. This principle cannot be overstated. Running {{es}} without security leaves your cluster exposed to anyone who can send network traffic to {{es}}, permitting these individuals to download, modify, or delete any data in your cluster. [Start the {{stack}} with security enabled](/deploy-manage/security/security-certificates-keys.md) or [manually configure security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md) to prevent unauthorized access to your clusters and ensure that internode communication is secure. ## Run {{es}} with a dedicated non-root user [security-not-root-user] diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-cluster.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-cluster.md index fd450af95e..da4f373cbb 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-cluster.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-cluster.md @@ -2,7 +2,7 @@ The {{stack}} is comprised of many moving parts. There are the {{es}} nodes that form the cluster, plus {{ls}} instances, {{kib}} instances, {{beats}} agents, and clients all communicating with the cluster. To keep your cluster safe, adhere to the [{{es}} security principles](../../../deploy-manage/security.md). -The first principle is to run {{es}} with security enabled. Configuring security can be complicated, so we made it easy to [start the {{stack}} with security enabled and configured](../../../deploy-manage/security/security-certificates-keys.md). For any new clusters, just start {{es}} to automatically enable password protection, secure internode communication with Transport Layer Security (TLS), and encrypt connections between {{es}} and {{kib}}. +The first principle is to run {{es}} with security enabled. Configuring security can be complicated, so we made it easy to [start the {{stack}} with security enabled and configured](/deploy-manage/security/security-certificates-keys.md). For any new clusters, just start {{es}} to automatically enable password protection, secure internode communication with Transport Layer Security (TLS), and encrypt connections between {{es}} and {{kib}}. If you have an existing, unsecured cluster (or prefer to manage security on your own), you can [manually enable and configure security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md) to secure {{es}} clusters and any clients that communicate with your clusters. You can also implement additional security measures, such as role-based access control, IP filtering, and auditing. @@ -35,7 +35,7 @@ See [User authentication](../../../deploy-manage/users-roles/cluster-or-deployme A critical part of security is keeping confidential data secured. {{es}} has built-in protections against accidental data loss and corruption. However, there’s nothing to stop deliberate tampering or data interception. The {{stack-security-features}} use TLS to preserve the *integrity* of your data against tampering, while also providing *confidentiality* by encrypting communications to, from, and within the cluster. For even greater protection, you can increase the [encryption strength](../../../deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md). -See [Configure security for the {{stack}}](../../../deploy-manage/security/security-certificates-keys.md). +See [Configure security for the {{stack}}](/deploy-manage/security/security-certificates-keys.md). ## Maintaining an audit trail [maintaining-audit-trail] diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md index c59eae3dd5..470a34dc4f 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md @@ -126,7 +126,7 @@ Complete the following steps **for each node in your cluster**. To join the same ## What’s next? [encrypting-internode-whatsnext] -Congratulations! You’ve encrypted communications between the nodes in your cluster and can pass the [TLS bootstrap check](bootstrap-checks-xpack.md#bootstrap-checks-tls). +Congratulations! You’ve encrypted communications between the nodes in your cluster and can pass the [TLS bootstrap check](/deploy-manage/deploy/self-managed/bootstrap-checks.md#bootstrap-checks-tls). To add another layer of security, [Set up basic security for the Elastic Stack plus secured HTTPS traffic](security-basic-setup-https.md). In addition to configuring TLS on the transport interface of your {{es}} cluster, you configure TLS on the HTTP interface for both {{es}} and {{kib}}. diff --git a/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md b/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md index 3a476d98b7..3e5a89b113 100644 --- a/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md +++ b/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md @@ -13,7 +13,7 @@ In a standard TLS configuration, the server presents a signed certificate to aut {{es}} {{security-features}} are enabled on your cluster by default, so each request that {{kib}} (the client) makes to {{es}} (the server) is authenticated. Most requests made by end users through {{kib}} to {{es}} are authenticated by using the credentials of the logged-in user. -To [enroll {{kib}} with an {{es}} cluster](../../../deploy-manage/security/security-certificates-keys.md#stack-start-with-security), you pass a generated enrollment token. This token configures {{kib}} to authenticate with {{es}} using a [service account token](../../../deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md#service-accounts-tokens). {{kib}} also supports mutual TLS authentication with {{es}} via a [Public Key Infrastructure (PKI) realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md). With this setup, {{es}} needs to verify the signature on the {{kib}} client certificate, and it also needs to map the client certificate’s distinguished name (DN) to the appropriate `kibana_system` role. +To [enroll {{kib}} with an {{es}} cluster](/deploy-manage/security/security-certificates-keys.md#stack-start-with-security), you pass a generated enrollment token. This token configures {{kib}} to authenticate with {{es}} using a [service account token](../../../deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md#service-accounts-tokens). {{kib}} also supports mutual TLS authentication with {{es}} via a [Public Key Infrastructure (PKI) realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md). With this setup, {{es}} needs to verify the signature on the {{kib}} client certificate, and it also needs to map the client certificate’s distinguished name (DN) to the appropriate `kibana_system` role. ::::{note} Using a PKI realm is a [subscription feature](https://www.elastic.co/subscriptions). @@ -23,7 +23,7 @@ Using a PKI realm is a [subscription feature](https://www.elastic.co/subscriptio ## Configure {{kib}} and {{es}} to use mutual TLS authentication [_configure_kib_and_es_to_use_mutual_tls_authentication] -If you haven’t already, start {{kib}} and connect it to {{es}} using the [enrollment token](../../../deploy-manage/security/security-certificates-keys.md#stack-start-with-security). +If you haven’t already, start {{kib}} and connect it to {{es}} using the [enrollment token](/deploy-manage/security/security-certificates-keys.md#stack-start-with-security). 1. Obtain a client certificate and private key for {{kib}}. diff --git a/raw-migrated-files/kibana/kibana/using-kibana-with-security.md b/raw-migrated-files/kibana/kibana/using-kibana-with-security.md index eb5ade4018..b0c9366e52 100644 --- a/raw-migrated-files/kibana/kibana/using-kibana-with-security.md +++ b/raw-migrated-files/kibana/kibana/using-kibana-with-security.md @@ -5,7 +5,7 @@ navigation_title: "Configure security" # Configure security in {{kib}} [using-kibana-with-security] -When you start {{es}} for the first time, {{stack-security-features}} are enabled on your cluster and TLS is configured automatically. The security configuration process generates a password for the `elastic` user and an enrollment token for {{kib}}. [Start the {{stack}} with security enabled](../../../deploy-manage/security/security-certificates-keys.md) and then enroll {{kib}} as part of the configuration process. +When you start {{es}} for the first time, {{stack-security-features}} are enabled on your cluster and TLS is configured automatically. The security configuration process generates a password for the `elastic` user and an enrollment token for {{kib}}. [Start the {{stack}} with security enabled](/deploy-manage/security/security-certificates-keys.md) and then enroll {{kib}} as part of the configuration process. You can then log in to {{kib}} as the `elastic` user to create additional roles and users. From b8653f956f1cd62b28c1d823b845f4ad3af90909 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Wed, 5 Mar 2025 21:27:03 -0500 Subject: [PATCH 09/43] more errors --- deploy-manage/deploy/cloud-on-k8s/create-custom-images.md | 2 +- .../deploy/self-managed/important-settings-configuration.md | 2 +- .../deploy/self-managed/install-elasticsearch-docker-basic.md | 2 +- .../maintenance/start-stop-services/start-stop-elasticsearch.md | 2 +- .../elasticsearch-reference/security-basic-setup.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy-manage/deploy/cloud-on-k8s/create-custom-images.md b/deploy-manage/deploy/cloud-on-k8s/create-custom-images.md index 8067831804..042b5be9a2 100644 --- a/deploy-manage/deploy/cloud-on-k8s/create-custom-images.md +++ b/deploy-manage/deploy/cloud-on-k8s/create-custom-images.md @@ -50,7 +50,7 @@ If your custom images follow the naming convention adopted by the official image For more information, check the following references: -* [Elasticsearch documentation on Using custom Docker images](/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md#_c_customized_image) +* [Elasticsearch documentation on Using custom Docker images](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md#_c_customized_image) * [Google Container Registry](https://cloud.google.com/container-registry/docs/how-to) * [Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/) * [Amazon Elastic Container Registry](https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.md) diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index 2ec703882a..eaf563dc56 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -134,7 +134,7 @@ If your master-eligible nodes do not have fixed names or addresses, use an [alte ### `cluster.initial_master_nodes` [initial_master_nodes] -When you start an {{es}} cluster for the first time, a [cluster bootstrapping](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) step determines the set of master-eligible nodes whose votes are counted in the first election. In [development mode](/deploy-manage/deploy/self-managed/bootstrap-checks.md#bootstrap-checks.md#dev-vs-prod-mode), with no discovery settings configured, this step is performed automatically by the nodes themselves. +When you start an {{es}} cluster for the first time, a [cluster bootstrapping](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) step determines the set of master-eligible nodes whose votes are counted in the first election. In [development mode](/deploy-manage/deploy/self-managed/bootstrap-checks.md#dev-vs-prod-mode), with no discovery settings configured, this step is performed automatically by the nodes themselves. Because auto-bootstrapping is [inherently unsafe](../../distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md), when starting a new cluster in production mode, you must explicitly list the master-eligible nodes whose votes should be counted in the very first election. You set this list using the `cluster.initial_master_nodes` setting on every master-eligible node. Do not configure this setting on master-ineligible nodes. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md index 0778202785..cf45a330f1 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md @@ -66,7 +66,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes ``` ::::{tip} - Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](#docker-set-heap-size). + Use the `-m` flag to set a memory limit for the container. This removes the need to [manually set the JVM size](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md#docker-set-heap-size). :::: diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index 619f1989df..86c9a20e64 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -122,7 +122,7 @@ To avoid this, upgrade your `systemd` to at least version 238. You can also temp ### Docker images [start-docker] -If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [Run {{es}} in Docker](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md#docker-cli-run-dev-mode). +If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). ### RPM packages [start-rpm] diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md index 470a34dc4f..ba35464be4 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup.md @@ -8,7 +8,7 @@ navigation_title: "Set up basic security" When you start {{es}} for the first time, passwords are generated for the `elastic` user and TLS is automatically configured for you. If you configure security manually *before* starting your {{es}} nodes, the auto-configuration process will respect your security configuration. You can adjust your TLS configuration at any time, such as [updating node certificates](../../../deploy-manage/security/updating-certificates.md). ::::{important} -If your cluster has multiple nodes, then you must configure TLS between nodes. [Production mode](bootstrap-checks.md#dev-vs-prod-mode) clusters will not start if you do not enable TLS. +If your cluster has multiple nodes, then you must configure TLS between nodes. [Production mode](/deploy-manage/deploy/self-managed/bootstrap-checks.md#dev-vs-prod-mode) clusters will not start if you do not enable TLS. :::: From a182f40e82675db1d136d84af24642836b29b05f Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 6 Mar 2025 23:38:18 -0500 Subject: [PATCH 10/43] kib --- deploy-manage/deploy/self-managed.md | 12 +- .../self-managed/_snippets/enroll-steps.md | 21 + .../self-managed/_snippets/enroll-systemd.md | 30 + .../_snippets/install-next-steps.md | 3 +- .../self-managed/_snippets/kib-releases.md | 1 + .../_snippets/new-enrollment-token.md | 5 + .../deploy/self-managed/access-kibana.md | 21 + deploy-manage/deploy/self-managed/access.md | 38 -- .../self-managed/configure-elasticsearch.md | 51 +- .../deploy/self-managed/configure-kibana.md | 29 + .../deploy/self-managed/configure.md | 527 ------------------ .../important-settings-configuration.md | 28 +- ...stall-elasticsearch-with-debian-package.md | 6 +- ...all-kibana-from-archive-on-linux-macos.md} | 78 ++- .../self-managed/install-kibana-on-windows.md | 72 +++ ... => install-kibana-with-debian-package.md} | 89 +-- ...ocker.md => install-kibana-with-docker.md} | 27 +- ...with-rpm.md => install-kibana-with-rpm.md} | 70 +-- .../deploy/self-managed/install-kibana.md | 68 ++- .../deploy/self-managed/install-on-windows.md | 82 --- .../self-managed/installing-elasticsearch.md | 56 -- .../other-configuration-settings.md | 9 - deploy-manage/deploy/self-managed/plugins.md | 10 +- ...figure-security-in-self-managed-cluster.md | 44 ++ deploy-manage/toc.yml | 15 +- .../configuring-stack-security.md | 237 -------- raw-migrated-files/kibana/kibana/install.md | 58 -- raw-migrated-files/kibana/kibana/setup.md | 28 - 28 files changed, 474 insertions(+), 1241 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/enroll-steps.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/enroll-systemd.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/kib-releases.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md create mode 100644 deploy-manage/deploy/self-managed/access-kibana.md delete mode 100644 deploy-manage/deploy/self-managed/access.md create mode 100644 deploy-manage/deploy/self-managed/configure-kibana.md delete mode 100644 deploy-manage/deploy/self-managed/configure.md rename deploy-manage/deploy/self-managed/{install-from-archive-on-linux-macos.md => install-kibana-from-archive-on-linux-macos.md} (61%) create mode 100644 deploy-manage/deploy/self-managed/install-kibana-on-windows.md rename deploy-manage/deploy/self-managed/{install-with-debian-package.md => install-kibana-with-debian-package.md} (59%) rename deploy-manage/deploy/self-managed/{install-with-docker.md => install-kibana-with-docker.md} (93%) rename deploy-manage/deploy/self-managed/{install-with-rpm.md => install-kibana-with-rpm.md} (63%) delete mode 100644 deploy-manage/deploy/self-managed/install-on-windows.md delete mode 100644 deploy-manage/deploy/self-managed/other-configuration-settings.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md delete mode 100644 raw-migrated-files/kibana/kibana/install.md delete mode 100644 raw-migrated-files/kibana/kibana/setup.md diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 74e7b2553f..fae873d0c7 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -7,14 +7,4 @@ sub: # Self-managed cluster [dependencies-versions] -See [Elastic Stack Third-party Dependencices](https://artifacts.elastic.co/reports/dependencies/dependencies-current.md) for the complete list of dependencies for {{es}}. - - -```sh -{{stack-version}} -``` - -{{stack-version}} - -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `elasticsearch--linux-x86_64.tar.gz: OK`. -2. This directory is known as `$ES_HOME`. \ No newline at end of file +See [Elastic Stack third-party dependencies](https://artifacts.elastic.co/reports/dependencies/dependencies-current.md) for the complete list of dependencies for {{es}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md b/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md new file mode 100644 index 0000000000..12eee83897 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md @@ -0,0 +1,21 @@ +If this is the first time you’re starting {{kib}}, this command generates a unique link in your terminal to enroll your {{kib}} instance with {{es}}. + +1. In your terminal, click the generated link to open {{kib}} in your browser. +2. In your browser, paste the enrollment token that was generated in the terminal when you started {{es}}, and then click the button to connect your {{kib}} instance with {{es}}. +3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. + +::::{note} +If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. +:::: + +:::{tip} +{{kib}} won’t enter interactive mode if it detects existing credentials for {{es}} (`elasticsearch.username` and `elasticsearch.password`) or an existing URL for `elasticsearch.hosts`. + +In this case, you can enroll {{kib}} in detached mode: + +Run the `kibana-setup` tool and pass the generated enrollment token with the `--enrollment-token` parameter. + +```sh +bin/kibana-setup --enrollment-token +``` +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-systemd.md b/deploy-manage/deploy/self-managed/_snippets/enroll-systemd.md new file mode 100644 index 0000000000..45ee07d243 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-systemd.md @@ -0,0 +1,30 @@ +1. Run the `status` command to get details about the {{{kib}}} service. + + ```sh + sudo systemctl status kibana + ``` + + + In the `status` command output, a URL is shown with: + + * A host address to access {{kib}} + * A six digit verification code + + For example: + + ```sh + Kibana has not been configured. + Go to http://:5601/?code= to get started. + ``` + + Make a note of the verification code. + +2. Go to the host address. + + It can take a minute or two for {{kib}} to start up, so refresh the page if you don’t see a prompt right away. + +3. When {{kib}} starts, you’re prompted to provide an enrollment token. Paste in the {{kib}} enrollment token that you generated earlier. +4. Click **Configure Elastic**. +5. If you’re prompted to provide a verification code, copy and paste in the six digit code that was returned by the `status` command. Then, wait for the setup to complete. +6. When you see the **Welcome to Elastic** page, provide the `elastic` as the username and provide the password that you copied from the install command output when you set up your first {{es}} node. +7. Click **Log in**. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md index c1f7d4767b..af594640da 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -1,5 +1,4 @@ You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: * Learn how to [configure {{es}}](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). -* Configure [important {{es}} settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). -* Configure [important system settings](/deploy-manage/deploy/self-managed/important-system-configuration.md). \ No newline at end of file +* Configure [important {{es}} settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/kib-releases.md b/deploy-manage/deploy/self-managed/_snippets/kib-releases.md new file mode 100644 index 0000000000..4e265d1632 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/kib-releases.md @@ -0,0 +1 @@ +The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md b/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md new file mode 100644 index 0000000000..982a0f2199 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md @@ -0,0 +1,5 @@ +If your enrollment token has expired, then you can generate a new enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: + +```sh +bin/elasticsearch-create-enrollment-token -s kibana +``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/access-kibana.md b/deploy-manage/deploy/self-managed/access-kibana.md new file mode 100644 index 0000000000..dbe5b4a598 --- /dev/null +++ b/deploy-manage/deploy/self-managed/access-kibana.md @@ -0,0 +1,21 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/kibana/current/access.html +--- + +# Access {{kib}} [access] + +Access {{kib}} through the web application on port 5601. + +1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. + + To remotely connect to {{kib}}, set [server.host](configure.md#server-host) to a non-loopback address. + +2. Log on to your account. +3. Go to the home page, then click **{{kib}}**. +4. To make the {{kib}} page your landing page, click **Make this my landing page**. + +## Resources + +* [Troubleshoot: Check {{kib}} server status](/troubleshoot/kibana/access.md) +* [Troubleshoot: Error: {{kib}} server is not ready yet](/troubleshoot/kibana/error-server-not-ready.md) \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/access.md b/deploy-manage/deploy/self-managed/access.md deleted file mode 100644 index efaed030dc..0000000000 --- a/deploy-manage/deploy/self-managed/access.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/kibana/current/access.html ---- - -# Access [access] - -The fastest way to access {{kib}} is to use our hosted {{es}} Service. If you [installed {{kib}} on your own](install-kibana.md), access {{kib}} through the web application. - - -## Set up on cloud [_set_up_on_cloud] - -There’s no faster way to get started than with {{ecloud}}: - -1. [Get a free trial](https://cloud.elastic.co/registration?page=docs&placement=docs-body). -2. Log into [Elastic Cloud](https://cloud.elastic.co?page=docs&placement=docs-body). -3. Click **Create deployment**. -4. Give your deployment a name. -5. Click **Create deployment** and download the password for the `elastic` user. - -That’s it! Now that you are up and running, it’s time to get some data into {{kib}}. {{kib}} will open as soon as your deployment is ready. - - -## Log on to the web application [log-on-to-the-web-application] - -If you are using a self-managed deployment, access {{kib}} through the web application on port 5601. - -1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. - - To remotely connect to {{kib}}, set [server.host](configure.md#server-host) to a non-loopback address. - -2. Log on to your account. -3. Go to the home page, then click **{{kib}}**. -4. To make the {{kib}} page your landing page, click **Make this my landing page**. - -## Resources -* [Troubleshoot: Check {{kib}} server status](/troubleshoot/kibana/access.md) -* [Troubleshoot: Error: {{kib}} server is not ready yet](/troubleshoot/kibana/error-server-not-ready.md) \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index f1f1f73fe0..0eacc62959 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -1,6 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html +applies_to: + deployment: + self: --- # Configure {{es}} [settings] @@ -9,8 +12,14 @@ mapped_pages: The configuration files should contain settings which are node-specific (such as `node.name` and paths), or settings which a node requires in order to be able to join a cluster, such as `cluster.name` and `network.host`. +## Available settings -## Config files location [config-files-location] +For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference.md). + +For a list of settings that must be configured before using your cluster in production, refer to [](/deploy-manage/deploy/self-managed/important-settings-configuration.md). + + +## Config files [config-files-location] {{es}} has three configuration files: @@ -20,15 +29,26 @@ The configuration files should contain settings which are node-specific (such as These files are located in the config directory, whose default location depends on whether or not the installation is from an archive distribution (`tar.gz` or `zip`) or a package distribution (Debian or RPM packages). -For the archive distributions, the config directory location defaults to `$ES_HOME/config`. The location of the config directory can be changed via the `ES_PATH_CONF` environment variable as follows: +### Archive distributions + +For the archive distributions, the config directory location defaults to `$ES_HOME/config`. The location of the config directory can be changed using the `ES_PATH_CONF` environment variable: ```sh ES_PATH_CONF=/path/to/my/config ./bin/elasticsearch ``` -Alternatively, you can `export` the `ES_PATH_CONF` environment variable via the command line or via your shell profile. +Alternatively, you can `export` the `ES_PATH_CONF` environment variable through the command line or through your shell profile. + +### Package distributions + +For the package distributions, the config directory location defaults to `/etc/elasticsearch`. + +The location of the config directory can be changed by setting the `ES_PATH_CONF` environment variable, however, setting the environment variable in your shell is not sufficient. Instead, this variable is sourced from one the following locations: + +* Debian: `/etc/default/elasticsearch` +* RPM: `/etc/sysconfig/elasticsearch` -For the package distributions, the config directory location defaults to `/etc/elasticsearch`. The location of the config directory can also be changed via the `ES_PATH_CONF` environment variable, but note that setting this in your shell is not sufficient. Instead, this variable is sourced from `/etc/default/elasticsearch` (for the Debian package) and `/etc/sysconfig/elasticsearch` (for the RPM package). You will need to edit the `ES_PATH_CONF=/etc/elasticsearch` entry in one of these files accordingly to change the config directory location. +You need to edit the `ES_PATH_CONF=/etc/elasticsearch` entry in the relevant file for your package to change the config directory location. ## Config file format [_config_file_format] @@ -79,15 +99,13 @@ Values for environment variables must be simple strings. Use a comma-separated s export HOSTNAME="host1,host2" ``` - ## Cluster and node setting types [cluster-setting-types] Cluster and node settings can be categorized based on how they are configured: -$$$dynamic-cluster-setting$$$ +### Dynamic [dynamic-cluster-setting] -Dynamic -: You can configure and update dynamic settings on a running cluster using the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). You can also configure dynamic settings locally on an unstarted or shut down node using `elasticsearch.yml`. +You can configure and update dynamic settings on a running cluster using the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). You can also configure dynamic settings locally on an unstarted or shut down node using `elasticsearch.yml`. Updates made using the cluster update settings API can be *persistent*, which apply across cluster restarts, or *transient*, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a `null` value using the API. @@ -100,27 +118,18 @@ If you configure the same setting using multiple methods, {{es}} applies the set For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. -::::{tip} -If you use {{ech}}, use the [user settings](../elastic-cloud/edit-stack-settings.md) feature to configure all cluster settings. This method lets {{ech}} automatically reject unsafe settings that could break your cluster. - -If you run {{es}} on your own hardware, use the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. - -:::: - +Use the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. ::::{warning} We no longer recommend using transient cluster settings. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - :::: +### Static [static-cluster-setting] -$$$static-cluster-setting$$$ - -Static -: Static settings can only be configured on an unstarted or shut down node using `elasticsearch.yml`. +Static settings can only be configured on an unstarted or shut down node using `elasticsearch.yml`. - Static settings must be set on every relevant node in the cluster. +Static settings must be set on every relevant node in the cluster. diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md new file mode 100644 index 0000000000..7481936313 --- /dev/null +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -0,0 +1,29 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/kibana/current/settings.html +--- + +# Configure {{kib}} [settings] + +The {{kib}} server reads properties from the `kibana.yml` file on startup. + +The location of this file differs depending on how you installed {{kib}} + +* **Archive distributions (`.tar.gz` or `.zip`)**: Default location is `$KIBANA_HOME/config` +* **Package distributions (Debian or RPM)**: Default location is `/etc/kibana` + +The config directory can be changed using the `KBN_PATH_CONF` environment variable: + +```text +KBN_PATH_CONF=/home/kibana/config ./bin/kibana +``` + +The default host and port settings configure {{kib}} to run on `localhost:5601`. To change this behavior and allow remote users to connect, you need to update your [`server.host`](kibana://reference/configuration-reference/general-settings.md#server-host) and [`server.port`](kibana://reference/configuration-reference/general-settings.md#server-port) settings in the `kibana.yml` file. + +In this file, you can also enable SSL and set a variety of other options. + +Environment variables can be injected into configuration using `${MY_ENV_VAR}` syntax. By default, configuration validation will fail if an environment variable used in the config file is not present when {{kib}} starts. This behavior can be changed by using a default value for the environment variable, using the `${MY_ENV_VAR:defaultValue}` syntax. + +## Available settings + +For a complete list of settings that you can apply to {{kib}}, refer to [{{kib}} configuration reference](kibana:///reference/configuration-reference.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure.md b/deploy-manage/deploy/self-managed/configure.md deleted file mode 100644 index ad13d7ebe4..0000000000 --- a/deploy-manage/deploy/self-managed/configure.md +++ /dev/null @@ -1,527 +0,0 @@ ---- -mapped_pages: - - https://www.elastic.co/guide/en/kibana/current/settings.html ---- - -# Configure [settings] - -The {{kib}} server reads properties from the `kibana.yml` file on startup. The location of this file differs depending on how you installed {{kib}}. For example, if you installed {{kib}} from an archive distribution (`.tar.gz` or `.zip`), by default it is in `$KIBANA_HOME/config`. By default, with package distributions (Debian or RPM), it is in `/etc/kibana`. The config directory can be changed via the `KBN_PATH_CONF` environment variable: - -```text -KBN_PATH_CONF=/home/kibana/config ./bin/kibana -``` - -The default host and port settings configure {{kib}} to run on `localhost:5601`. To change this behavior and allow remote users to connect, you’ll need to update your `kibana.yml` file. You can also enable SSL and set a variety of other options. - -Environment variables can be injected into configuration using `${MY_ENV_VAR}` syntax. By default, configuration validation will fail if an environment variable used in the config file is not present when {{kib}} starts. This behavior can be changed by using a default value for the environment variable, using the `${MY_ENV_VAR:defaultValue}` syntax. - -`console.ui.enabled` -: Toggling this causes the server to regenerate assets on the next startup, which may cause a delay before pages start being served. Set to `false` to disable Console. **Default: `true`** - -`csp.script_src` -: Add sources for the [Content Security Policy `script-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src). - -`csp.disableUnsafeEval` -: [8.7.0] Set this to `false` to add the [`unsafe-eval`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src#unsafe_eval_expressions) source expression to the `script-src` directive. **Default: `true`** - - When `csp.disableUnsafeEval` is set to `true`, {{kib}} will use a custom version of the Handlebars template library. Handlebars is used in various locations in the {{kib}} frontend where custom templates can be supplied by the user when for instance setting up a visualisation. If you experience any issues rendering Handlebars templates, please set this setting to `false` and [open an issue](https://github.com/elastic/kibana/issues/new/choose) in the {{kib}} GitHub repository. - - -`csp.worker_src` -: Add sources for the [Content Security Policy `worker-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/worker-src). - -`csp.style_src` -: Add sources for the [Content Security Policy `style-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src). - -`csp.connect_src` -: Add sources for the [Content Security Policy `connect-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src). - -`csp.default_src` -: Add sources for the [Content Security Policy `default-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src). - -`csp.font_src` -: Add sources for the [Content Security Policy `font-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src). - -`csp.frame_src` -: Add sources for the [Content Security Policy `frame-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-src). - -`csp.img_src` -: Add sources for the [Content Security Policy `img-src` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src). - -`csp.frame_ancestors` -: Add sources for the [Content Security Policy `frame-ancestors` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors). - - ::::{note} - The `frame-ancestors` directive can also be configured by using [`server.securityResponseHeaders.disableEmbedding`](#server-securityResponseHeaders-disableEmbedding). In that case, that takes precedence and any values in `csp.frame_ancestors` are ignored. - :::: - - -`csp.report_only.form_action` -: Add sources for the [Content Security Policy `form-action` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/form-action) in reporting mode. - -`csp.report_uri` -: Add sources for the [Content Security Policy `report-uri` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/report-uri). - -`csp.report_to:` -: Add sources for the [Content Security Policy `report-to` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/report-to). - -$$$csp-strict$$$ `csp.strict` -: Blocks {{kib}} access to any browser that does not enforce even rudimentary CSP rules. In practice, this disables support for older, less safe browsers like Internet Explorer. For more information, refer to [Content Security Policy](../../security/secure-http-communications.md#csp-strict-mode). **Default: `true`** - -`csp.warnLegacyBrowsers` -: Shows a warning message after loading {{kib}} to any browser that does not enforce even rudimentary CSP rules, though {{kib}} is still accessible. This configuration is effectively ignored when [`csp.strict`](#csp-strict) is enabled. **Default: `true`** - -`permissionsPolicy.report_to:` -: Add sources for the [Permissions Policy `report-to` directive](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy). - -$$$elasticsearch-maxSockets$$$ `elasticsearch.maxSockets` -: The maximum number of sockets that can be used for communications with {{es}}. **Default: `Infinity`** - -$$$elasticsearch-maxResponseSize$$$ `elasticsearch.maxResponseSize` -: Either `false` or a `byteSize` value. When set, responses from {{es}} with a size higher than the defined limit will be rejected. This is intended to be used as a circuit-breaker mechanism to avoid memory errors in case of unexpectedly high responses coming from {{es}}. **Default: `false`** - -$$$elasticsearch-maxIdleSockets$$$ `elasticsearch.maxIdleSockets` -: The maximum number of idle sockets to keep open between {{kib}} and {{es}}. If more sockets become idle, they will be closed. **Default: `256`** - -$$$elasticsearch-idleSocketTimeout$$$ `elasticsearch.idleSocketTimeout` -: The timeout for idle sockets kept open between {{kib}} and {{es}}. If the socket is idle for longer than this timeout, it will be closed. If you have a transparent proxy between {{kib}} and {{es}} be sure to set this value lower than or equal to the proxy’s timeout. **Default: `60s`** - -`elasticsearch.customHeaders` -: | Header names and values to send to {{es}}. Any custom headers cannot be overwritten by client-side headers, regardless of the [`elasticsearch.requestHeadersWhitelist`](#elasticsearch-requestHeadersWhitelist) configuration. **Default: `{}`** - -$$$elasticsearch-hosts$$$ `elasticsearch.hosts:` -: The URLs of the {{es}} instances to use for all your queries. All nodes listed here must be on the same cluster. **Default: `[ "http://localhost:9200" ]`** - - To enable SSL/TLS for outbound connections to {{es}}, use the `https` protocol in this setting. - - -$$$elasticsearch-publicBaseUrl$$$ `elasticsearch.publicBaseUrl:` -: The URL through which {{es}} is publicly accessible, if any. This will be shown to users in {{kib}} when they need connection details for your {{es}} cluster. - -$$$elasticsearch-pingTimeout$$$ `elasticsearch.pingTimeout` -: Time in milliseconds to wait for {{es}} to respond to pings. **Default: the value of the [`elasticsearch.requestTimeout`](#elasticsearch-requestTimeout) setting** - -$$$elasticsearch-requestHeadersWhitelist$$$ `elasticsearch.requestHeadersWhitelist` -: List of {{kib}} client-side headers to send to {{es}}. To send **no** client-side headers, set this value to [] (an empty list). Removing the `authorization` header from being whitelisted means that you cannot use [basic authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/kibana-authentication.md) in {{kib}}. **Default: `[ 'authorization', 'es-client-authentication' ]`** - -$$$elasticsearch-requestTimeout$$$ `elasticsearch.requestTimeout` -: Time in milliseconds to wait for responses from the back end or {{es}}. This value must be a positive integer. **Default: `30000`** - -`elasticsearch.shardTimeout` -: Time in milliseconds for {{es}} to wait for responses from shards. Set to 0 to disable. **Default: `30000`** - -`elasticsearch.compression` -: Specifies whether {{kib}} should use compression for communications with {{es}}. **Default: `false`** - -`elasticsearch.sniffInterval` -: Time in milliseconds between requests to check {{es}} for an updated list of nodes. **Default: `false`** - -`elasticsearch.sniffOnStart` -: Attempt to find other {{es}} nodes on startup. **Default: `false`** - -`elasticsearch.sniffOnConnectionFault` -: Update the list of {{es}} nodes immediately following a connection fault. **Default: `false`** - -$$$elasticsearch-ssl-alwaysPresentCertificate$$$ `elasticsearch.ssl.alwaysPresentCertificate` -: Controls {{kib}} behavior in regard to presenting a client certificate when requested by {{es}}. This setting applies to all outbound SSL/TLS connections to {{es}}, including requests that are proxied for end users. **Default: `false`** - - ::::{warning} - When {{es}} uses certificates to authenticate end users with a PKI realm and [`elasticsearch.ssl.alwaysPresentCertificate`](#elasticsearch-ssl-alwaysPresentCertificate) is `true`, proxied requests may be executed as the identity that is tied to the {{kib}} server. - :::: - - -$$$elasticsearch-ssl-cert-key$$$ `elasticsearch.ssl.certificate` and `elasticsearch.ssl.key` -: Paths to a PEM-encoded X.509 client certificate and its corresponding private key. These are used by {{kib}} to authenticate itself when making outbound SSL/TLS connections to {{es}}. For this setting to take effect, the `xpack.security.http.ssl.client_authentication` setting in {{es}} must be also be set to `"required"` or `"optional"` to request a client certificate from {{kib}}. - - ::::{note} - These settings cannot be used in conjunction with [`elasticsearch.ssl.keystore.path`](#elasticsearch-ssl-keystore-path). - :::: - - -$$$elasticsearch-ssl-certificateAuthorities$$$ `elasticsearch.ssl.certificateAuthorities` -: Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates, which make up a trusted certificate chain for {{es}}. This chain is used by {{kib}} to establish trust when making outbound SSL/TLS connections to {{es}}. - - In addition to this setting, trusted certificates may be specified via [`elasticsearch.ssl.keystore.path`](#elasticsearch-ssl-keystore-path) and/or [`elasticsearch.ssl.truststore.path`](#elasticsearch-ssl-truststore-path). - - -`elasticsearch.ssl.keyPassphrase` -: The password that decrypts the private key that is specified via [`elasticsearch.ssl.key`](#elasticsearch-ssl-cert-key). This value is optional, as the key may not be encrypted. - -$$$elasticsearch-ssl-keystore-path$$$ `elasticsearch.ssl.keystore.path` -: Path to a PKCS#12 keystore that contains an X.509 client certificate and it’s corresponding private key. These are used by {{kib}} to authenticate itself when making outbound SSL/TLS connections to {{es}}. For this setting, you must also set the `xpack.security.http.ssl.client_authentication` setting in {{es}} to `"required"` or `"optional"` to request a client certificate from {{kib}}. - - If the keystore contains any additional certificates, they are used as a trusted certificate chain for {{es}}. This chain is used by {{kib}} to establish trust when making outbound SSL/TLS connections to {{es}}. In addition to this setting, trusted certificates may be specified via [`elasticsearch.ssl.certificateAuthorities`](#elasticsearch-ssl-certificateAuthorities) and/or [`elasticsearch.ssl.truststore.path`](#elasticsearch-ssl-truststore-path). - - ::::{note} - This setting cannot be used in conjunction with [`elasticsearch.ssl.certificate`](#elasticsearch-ssl-cert-key) or [`elasticsearch.ssl.key`](#elasticsearch-ssl-cert-key). - :::: - - -`elasticsearch.ssl.keystore.password` -: The password that decrypts the keystore specified via [`elasticsearch.ssl.keystore.path`](#elasticsearch-ssl-keystore-path). If the keystore has no password, leave this as blank. If the keystore has an empty password, set this to `""`. - -$$$elasticsearch-ssl-truststore-path$$$ `elasticsearch.ssl.truststore.path` -: Path to a PKCS#12 trust store that contains one or more X.509 certificate authority (CA) certificates, which make up a trusted certificate chain for {{es}}. This chain is used by {{kib}} to establish trust when making outbound SSL/TLS connections to {{es}}. - - In addition to this setting, trusted certificates may be specified via [`elasticsearch.ssl.certificateAuthorities`](#elasticsearch-ssl-certificateAuthorities) and/or [`elasticsearch.ssl.keystore.path`](#elasticsearch-ssl-keystore-path). - - -`elasticsearch.ssl.truststore.password` -: The password that decrypts the trust store specified via [`elasticsearch.ssl.truststore.path`](#elasticsearch-ssl-truststore-path). If the trust store has no password, leave this as blank. If the trust store has an empty password, set this to `""`. - -$$$elasticsearch-ssl-verificationMode$$$ `elasticsearch.ssl.verificationMode` -: Controls the verification of the server certificate that {{kib}} receives when making an outbound SSL/TLS connection to {{es}}. Valid values are `"full"`, `"certificate"`, and `"none"`. Using `"full"` performs hostname verification, using `"certificate"` skips hostname verification, and using `"none"` skips verification entirely. **Default: `"full"`** - -$$$elasticsearch-user-passwd$$$ `elasticsearch.username` and `elasticsearch.password` -: If your {{es}} is protected with basic authentication, these settings provide the username and password that the {{kib}} server uses to perform maintenance on the {{kib}} index at startup. {{kib}} users still need to authenticate with {{es}}, which is proxied through the {{kib}} server. - -$$$elasticsearch-service-account-token$$$ `elasticsearch.serviceAccountToken` -: If your {{es}} is protected with basic authentication, this token provides the credentials that the {{kib}} server uses to perform maintenance on the {{kib}} index at startup. This setting is an alternative to `elasticsearch.username` and `elasticsearch.password`. - -`unifiedSearch.autocomplete.valueSuggestions.timeout` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Time in milliseconds to wait for autocomplete suggestions from {{es}}. This value must be a whole number greater than zero. **Default: `"1000"`** - -`unifiedSearch.autocomplete.valueSuggestions.terminateAfter` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Maximum number of documents loaded by each shard to generate autocomplete suggestions. This value must be a whole number greater than zero. **Default: `"100000"`** - - ::::{note} - To reload the [logging settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/logging-settings.md), send a SIGHUP signal to {{kib}}. For more logging configuration options, see the [Configure Logging in {{kib}}](../../monitor/logging-configuration/kibana-logging.md) guide. - :::: - - -$$$logging-root$$$ `logging.root` -: The `root` logger has is a [dedicated logger](../../monitor/logging-configuration/kibana-logging.md#dedicated-loggers) and is pre-configured. The `root` logger logs at `info` level by default. If any other logging configuration is specified, `root` *must* also be explicitly configured. - -$$$logging-root-appenders$$$ `logging.root.appenders` -: A list of logging appenders to forward the root level logger instance to. By default `root` is configured with the `default` appender that logs to stdout with a `pattern` layout. This is the configuration that all custom loggers will use unless they’re re-configured explicitly. You can override the default behavior by configuring a different [appender](../../monitor/logging-configuration/kibana-logging.md#logging-appenders) to apply to `root`. - -$$$logging-root-level$$$ `logging.root.level` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Level at which a log record should be logged. Supported levels are: *all*, *fatal*, *error*, *warn*, *info*, *debug*, *trace*, *off*. Levels are ordered from *all* (highest) to *off* and a log record will be logged it its level is higher than or equal to the level of its logger, otherwise the log record is ignored. Use this value to [change the overall log level](../../monitor/logging-configuration/kibana-log-settings-examples.md#change-overall-log-level). **Default: `info`**. - - ::::{tip} - Set to `all` to log all events, including system usage information and all requests. Set to `off` to silence all logs. You can also use the logging [cli commands](../../monitor/logging-configuration/kibana-logging-cli-configuration.md#logging-cli-migration) to set log level to `verbose` or silence all logs. - :::: - - - The following example shows a valid verbose `logging.root` configuration: - - ```text - logging: - appenders: - console_appender: - type: console - layout: - type: pattern - highlight: true - root: - appenders: [console_appender] - level: all - ``` - - -$$$logging-loggers$$$ `logging.loggers[]` -: Allows you to [customize a specific logger instance](../../monitor/logging-configuration/kibana-log-settings-examples.md#customize-specific-log-records). - -`logging.appenders[]` -: [Appenders](../../monitor/logging-configuration/kibana-logging.md#logging-appenders) define how and where log messages are displayed (eg. **stdout** or console) and stored (eg. file on the disk). - -`map.includeElasticMapsService` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Set to `false` to disable connections to Elastic Maps Service. When `includeElasticMapsService` is turned off, only tile layer configured by [`map.tilemap.url`](#tilemap-url) is available in [Maps](../../../explore-analyze/visualize/maps.md). **Default: `true`** - -`map.emsUrl` -: Specifies the URL of a self hosted [{{hosted-ems}}](../../../explore-analyze/visualize/maps/maps-connect-to-ems.md#elastic-maps-server) - -$$$tilemap-settings$$$ `map.tilemap.options.attribution` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: The map attribution string. Provide attributions in markdown and use `\|` to delimit attributions, for example: `"[attribution 1](https://www.attribution1)\|[attribution 2](https://www.attribution2)"`. **Default: `"© [Elastic Maps Service](https://www.elastic.co/elastic-maps-service)"`** - -$$$tilemap-max-zoom$$$ `map.tilemap.options.maxZoom` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: The maximum zoom level. **Default: `10`** - -$$$tilemap-min-zoom$$$ `map.tilemap.options.minZoom` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: The minimum zoom level. **Default: `1`** - -$$$tilemap-subdomains$$$ `map.tilemap.options.subdomains` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: An array of subdomains used by the tile service. Specify the position of the subdomain the URL with the token `{s}`. - -$$$tilemap-url$$$ `map.tilemap.url` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: The URL to the service that {{kib}} uses as the default basemap in [maps](../../../explore-analyze/visualize/maps.md) and [vega maps](../../../explore-analyze/visualize/custom-visualizations-with-vega.md#vega-with-a-map). By default, {{kib}} sets a basemap from the [Elastic Maps Service](../../../explore-analyze/visualize/maps/maps-connect-to-ems.md), but users can point to their own Tile Map Service. For example: `"https://tiles.elastic.co/v2/default/{{z}}/{x}/{{y}}.png?elastic_tile_service_tos=agree&my_app_name=kibana"` - -`migrations.batchSize` -: Defines the number of documents migrated at a time. The higher the value, the faster the Saved Objects migration process performs at the cost of higher memory consumption. If upgrade migrations results in {{kib}} crashing with an out of memory exception or fails due to an {{es}} `circuit_breaking_exception`, use a smaller `batchSize` value to reduce the memory pressure. **Default: `1000`** - -`migrations.maxBatchSizeBytes` -: Defines the maximum payload size for indexing batches of upgraded saved objects to avoid migrations failing due to a 413 Request Entity Too Large response from {{es}}. This value should be lower than or equal to your {{es}} cluster’s `http.max_content_length` configuration option. **Default: `100mb`** - -`migrations.retryAttempts` -: The number of times migrations retry temporary failures, such as a network timeout, 503 status code, or `snapshot_in_progress_exception`. When upgrade migrations frequently fail after exhausting all retry attempts with a message such as `Unable to complete the [...] step after 15 attempts, terminating.`, increase the setting value. **Default: `15`** - -`newsfeed.enabled` -: Controls whether to enable the newsfeed system for the {{kib}} UI notification center. Set to `false` to disable the newsfeed system. **Default: `true`** - -`node.roles` -: [preview] Indicates which roles to configure the {{kib}} process with, which will effectively run {{kib}} in different modes. Valid options are `background_tasks` and `ui`, or `*` to select all roles. **Default: `*`** - -`notifications.connectors.default.email` -: Choose the default email connector for user notifications. As of `8.6.0`, {{kib}} is shipping with a new notification mechanism that will send email notifications for various user actions, e.g. assigning a *Case* to a user. To enable notifications, an email connector must be [preconfigured](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/pre-configured-connectors.md) in the system via `kibana.yml`, and the notifications plugin must be configured to point to the ID of that connector. - -$$$path-data$$$ `path.data` -: The path where {{kib}} stores persistent data not saved in {{es}}. **Default: `data`** - -`pid.file` -: Specifies the path where {{kib}} creates the process ID file. - -`ops.interval` -: Set the interval in milliseconds to sample system and process performance metrics. The minimum value is 100. **Default: `5000`** - -$$$ops-cGroupOverrides-cpuPath$$$ `ops.cGroupOverrides.cpuPath` -: Override for cgroup cpu path when mounted in a manner that is inconsistent with `/proc/self/cgroup`. - -$$$ops-cGroupOverrides-cpuAcctPath$$$ `ops.cGroupOverrides.cpuAcctPath` -: Override for cgroup cpuacct path when mounted in a manner that is inconsistent with `/proc/self/cgroup`. - -$$$savedObjects-maxImportExportSize$$$ `savedObjects.maxImportExportSize` -: The maximum count of saved objects that can be imported or exported. This setting exists to prevent the {{kib}} server from running out of memory when handling large numbers of saved objects. It is recommended to only raise this setting if you are confident your server can hold this many objects in memory. **Default: `10000`** - -$$$savedObjects-maxImportPayloadBytes$$$ `savedObjects.maxImportPayloadBytes` -: The maximum byte size of a saved objects import that the {{kib}} server will accept. This setting exists to prevent the {{kib}} server from running out of memory when handling a large import payload. Note that this setting overrides the more general [`server.maxPayload`](#server-maxPayload) for saved object imports only. **Default: `26214400`** - -$$$server-basePath$$$ `server.basePath` -: Enables you to specify a path to mount {{kib}} at if you are running behind a proxy. Use the [`server.rewriteBasePath`](#server-rewriteBasePath) setting to tell {{kib}} if it should remove the basePath from requests it receives, and to prevent a deprecation warning at startup. This setting cannot end in a slash (`/`). - -$$$server-publicBaseUrl$$$ `server.publicBaseUrl` -: The publicly available URL that end-users access {{kib}} at. Must include the protocol, hostname, port (if different than the defaults for `http` and `https`, 80 and 443 respectively), and the [`server.basePath`](#server-basePath) (when that setting is configured explicitly). This setting cannot end in a slash (`/`). - -$$$server-compression$$$ `server.compression.enabled` -: Set to `false` to disable HTTP compression for all responses. **Default: `true`** - -`server.cors.enabled` -: [preview] Set to `true` to allow cross-origin API calls. **Default:** `false` - -`server.cors.allowCredentials` -: [preview] Set to `true` to allow browser code to access response body whenever request performed with user credentials. **Default:** `false` - -`server.cors.allowOrigin` -: experimental::[] List of origins permitted to access resources. You must specify explicit hostnames and not use `server.cors.allowOrigin: ["*"]` when `server.cors.allowCredentials: true`. **Default:** ["*"] - -`server.compression.referrerWhitelist` -: Specifies an array of trusted hostnames, such as the {{kib}} host, or a reverse proxy sitting in front of it. This determines whether HTTP compression may be used for responses, based on the request `Referer` header. This setting may not be used when [`server.compression.enabled`](#server-compression) is set to `false`. **Default: `none`** - -`server.compression.brotli.enabled` -: Set to `true` to enable brotli (br) compression format. Note: browsers not supporting brotli compression will fallback to using gzip instead. This setting may not be used when [`server.compression.enabled`](#server-compression) is set to `false`. **Default: `false`** - -$$$server-securityResponseHeaders-strictTransportSecurity$$$ `server.securityResponseHeaders.strictTransportSecurity` -: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are any text value or `null`. To disable, set to `null`. **Default:** `null` - -$$$server-securityResponseHeaders-xContentTypeOptions$$$ `server.securityResponseHeaders.xContentTypeOptions` -: Controls whether the [`X-Content-Type-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are `nosniff` or `null`. To disable, set to `null`. **Default:** `"nosniff"` - -$$$server-securityResponseHeaders-referrerPolicy$$$ `server.securityResponseHeaders.referrerPolicy` -: Controls whether the [`Referrer-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are `no-referrer`, `no-referrer-when-downgrade`, `origin`, `origin-when-cross-origin`, `same-origin`, `strict-origin`, `strict-origin-when-cross-origin`, `unsafe-url`, or `null`. To disable, set to `null`. **Default:** `"strict-origin-when-cross-origin"` - -$$$server-securityResponseHeaders-permissionsPolicy$$$ `server.securityResponseHeaders.permissionsPolicy` -: [preview] Controls whether the [`Permissions-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are any text value or `null`. Refer to the [`Permissions-Policy` documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy) for defined directives, values, and text format. To disable, set to `null`. **Default:** `camera=(), display-capture=(), fullscreen=(self), geolocation=(), microphone=(), web-share=()` - -$$$server-securityResponseHeaders-permissionsPolicyReportOnly$$$ `server.securityResponseHeaders.permissionsPolicyReportOnly` -: [preview] Controls whether the [`Permissions-Policy-Report-Only`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are any text value or `null`. Refer to the [`Permissions-Policy` documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy) for defined directives, values, and text format. - -$$$server-securityResponseHeaders-disableEmbedding$$$`server.securityResponseHeaders.disableEmbedding` -: Controls whether the [`Content-Security-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy) and [`X-Frame-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options) headers are configured to disable embedding {{kib}} in other webpages using iframes. When set to `true`, secure headers are used to disable embedding, which adds the `frame-ancestors: 'self'` directive to the `Content-Security-Policy` response header and adds the `X-Frame-Options: SAMEORIGIN` response header. **Default:** `false` - -$$$server-securityResponseHeaders-crossOriginOpenerPolicy$$$ `server.securityResponseHeaders.crossOriginOpenerPolicy` -: Controls whether the [`Cross-Origin-Opener-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cross-Origin-Opener-Policy) header is used in all responses to the client from the {{kib}} server, and specifies what value is used. Allowed values are `unsafe-none`, `same-origin-allow-popups`, `same-origin`, or `null`. To disable, set to `null`. **Default:** `"same-origin"` - -`server.customResponseHeaders` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Header names and values to send on all responses to the client from the {{kib}} server. **Default: `{}`** - -$$$server-shutdownTimeout$$$ `server.shutdownTimeout` -: Sets the grace period for {{kib}} to attempt to resolve any ongoing HTTP requests after receiving a `SIGTERM`/`SIGINT` signal, and before shutting down. Any new HTTP requests received during this period are rejected, because the incoming socket is closed without further processing. **Default: `30s`** - -$$$server-host$$$ `server.host` -: This setting specifies the host of the back end server. To allow remote users to connect, set the value to the IP address or DNS name of the {{kib}} server. Use `0.0.0.0` to make {{kib}} listen on all IPs (public and private). **Default: `"localhost"`** - -`server.keepaliveTimeout` -: The number of milliseconds to wait for additional data before restarting the [`server.socketTimeout`](#server-socketTimeout) counter. **Default: `"120000"`** - -$$$server-maxPayload$$$ `server.maxPayload` -: The maximum payload size in bytes for incoming server requests. **Default: `1048576`** - -`server.name` -: A human-readable display name that identifies this {{kib}} instance. **Default: `"your-hostname"`** - -$$$server-port$$$ `server.port` -: {{kib}} is served by a back end server. This setting specifies the port to use. **Default: `5601`** - -$$$server-protocol$$$ `server.protocol` -: [preview] The http protocol to use, either `http1` or `http2`. Set to `http1` to opt out of `HTTP/2` support when TLS is enabled. Use of `http1` may impact browser loading performance especially for dashboards with many panels. **Default**: `http2` if TLS is enabled, otherwise `http1`. - - ::::{note} - By default, enabling `http2` requires a valid `h2c` configuration, meaning that TLS must be enabled via [`server.ssl.enabled`](#server-ssl-enabled) and [`server.ssl.supportedProtocols`](#server-ssl-supportedProtocols), if specified, must contain at least `TLSv1.2` or `TLSv1.3`. Strict validation of the `h2c` setup can be disabled by adding `server.http2.allowUnsecure: true` to the configuration. - :::: - - -$$$server-rate-limiter-enabled$$$ `server.rateLimiter.enabled` -: Enables rate-limiting of requests to the {{kib}} server based on Node.js' Event Loop Utilization. If the average event loop utilization for the specified term exceeds the configured threshold, the server will respond with a `429 Too Many Requests` status code. - - This functionality should be used carefully as it may impact the server’s availability. The configuration options vary per environment, so it is recommended to enable this option in a testing environment first, adjust the rate-limiter configuration, and then roll it out to production. - - **Default: `false`** - - -`server.rateLimiter.elu` -: The Event Loop Utilization (ELU) threshold for rate-limiting requests to the {{kib}} server. The ELU is a value between 0 and 1, representing the average event loop utilization over the specified term. If the average ELU exceeds this threshold, the server will respond with a `429 Too Many Requests` status code. - - In a multi-instance environment with autoscaling, this value is usually between 0.6 and 0.8 to give the autoscaler enough time to react. This value can be higher in a single-instance environment but should not exceed 1.0. In general, the lower the value, the more aggressive the rate limiting. And the highest possible option should be used to prevent the {{kib}} server from being terminated. - - -`server.rateLimiter.term` -: This value is one of `short`, `medium`, or `long`, representing the term over which the average event loop utilization is calculated. It uses exponential moving averages (EMA) to smooth out the utilization values. Each term corresponds to `15s`, `30s`, and `60s`, respectively. - - The term value also changes the way the rate limiter sees the trend in the load: - - * `short`: `elu.short > server.rateLimiter.term`; - * `medium`: `elu.short > server.rateLimiter.elu AND elu.medium > server.rateLimiter.elu`; - * `long`: `elu.short > server.rateLimiter.elu AND elu.medium > server.rateLimiter.elu AND elu.long > server.rateLimiter.elu`. - - This behavior prevents requests from being throttled if the load starts decreasing. In general, the shorter the term, the more aggressive the rate limiting. In the multi-instance environment, the `medium` term makes the most sense as it gives the {{kib}} server enough time to spin up a new instance and prevents the existing instances from being terminated. - - -$$$server-requestId-allowFromAnyIp$$$ `server.requestId.allowFromAnyIp` -: Sets whether or not the `X-Opaque-Id` header should be trusted from any IP address for identifying requests in logs and forwarded to {{es}}. - -`server.requestId.ipAllowlist` -: A list of IPv4 and IPv6 address which the `X-Opaque-Id` header should be trusted from. Normally this would be set to the IP addresses of the load balancers or reverse-proxy that end users use to access Kibana. If any are set, [`server.requestId.allowFromAnyIp`](#server-requestId-allowFromAnyIp) must also be set to `false.` - -$$$server-rewriteBasePath$$$ `server.rewriteBasePath` -: Specifies whether {{kib}} should rewrite requests that are prefixed with [`server.basePath`](#server-basePath) or require that they are rewritten by your reverse proxy. **Default: `false`** - -$$$server-socketTimeout$$$ `server.socketTimeout` -: The number of milliseconds to wait before closing an inactive socket. **Default: `"120000"`** - -$$$server-payloadTimeout$$$ `server.payloadTimeout` -: Sets the maximum time allowed for the client to transmit the request payload (body) before giving up and responding with a Request Timeout (408) error response. **Default: `"20000"`** - -$$$server-ssl-cert-key$$$ `server.ssl.certificate` and `server.ssl.key` -: Paths to a PEM-encoded X.509 server certificate and its corresponding private key. These are used by {{kib}} to establish trust when receiving inbound SSL/TLS connections from users. - - ::::{note} - These settings cannot be used in conjunction with [`server.ssl.keystore.path`](#server-ssl-keystore-path). - :::: - - -$$$server-ssl-certificateAuthorities$$$ `server.ssl.certificateAuthorities` -: Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates which make up a trusted certificate chain for {{kib}}. This chain is used by {{kib}} to establish trust when receiving inbound SSL/TLS connections from end users. If PKI authentication is enabled, this chain is also used by {{kib}} to verify client certificates from end users. - - In addition to this setting, trusted certificates may be specified via [`server.ssl.keystore.path`](#server-ssl-keystore-path) and/or [`server.ssl.truststore.path`](#server-ssl-truststore-path). - - -$$$server-ssl-cipherSuites$$$ `server.ssl.cipherSuites` -: Details on the format, and the valid options, are available via the [OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.1.1/man1/ciphers.md#CIPHER-LIST-FORMAT). **Default: `TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA`**. - -`server.ssl.clientAuthentication` -: Controls the behavior in {{kib}} for requesting a certificate from client connections. Valid values are `"required"`, `"optional"`, and `"none"`. Using `"required"` will refuse to establish the connection unless a client presents a certificate, using `"optional"` will allow a client to present a certificate if it has one, and using `"none"` will prevent a client from presenting a certificate. **Default: `"none"`** - -$$$server-ssl-enabled$$$ `server.ssl.enabled` -: | Enables SSL/TLS for inbound connections to {{kib}}. When set to `true`, a certificate and its corresponding private key must be provided. These can be specified via [`server.ssl.keystore.path`](#server-ssl-keystore-path) or the combination of [`server.ssl.certificate`](#server-ssl-cert-key) and [`server.ssl.key`](#server-ssl-cert-key). **Default: `false`** - -`server.ssl.keyPassphrase` -: The password that decrypts the private key that is specified via [`server.ssl.key`](#server-ssl-cert-key). This value is optional, as the key may not be encrypted. - -$$$server-ssl-keystore-path$$$ `server.ssl.keystore.path` -: Path to a PKCS#12 keystore that contains an X.509 server certificate and its corresponding private key. If the keystore contains any additional certificates, those will be used as a trusted certificate chain for {{kib}}. All of these are used by {{kib}} to establish trust when receiving inbound SSL/TLS connections from end users. The certificate chain is also used by {{kib}} to verify client certificates from end users when PKI authentication is enabled. - - In addition to this setting, trusted certificates may be specified via [`server.ssl.certificateAuthorities`](#server-ssl-certificateAuthorities) and/or [`server.ssl.truststore.path`](#server-ssl-truststore-path). - - ::::{note} - This setting cannot be used in conjunction with [`server.ssl.certificate`](#server-ssl-cert-key) or [`server.ssl.key`](#server-ssl-cert-key) - :::: - - -`server.ssl.keystore.password` -: The password that will be used to decrypt the keystore specified via [`server.ssl.keystore.path`](#server-ssl-keystore-path). If the keystore has no password, leave this unset. If the keystore has an empty password, set this to `""`. - -$$$server-ssl-truststore-path$$$ `server.ssl.truststore.path` -: Path to a PKCS#12 trust store that contains one or more X.509 certificate authority (CA) certificates which make up a trusted certificate chain for {{kib}}. This chain is used by {{kib}} to establish trust when receiving inbound SSL/TLS connections from end users. If PKI authentication is enabled, this chain is also used by {{kib}} to verify client certificates from end users. - - In addition to this setting, trusted certificates may be specified via [`server.ssl.certificateAuthorities`](#server-ssl-certificateAuthorities) and/or [`server.ssl.keystore.path`](#server-ssl-keystore-path). - - -`server.ssl.truststore.password` -: The password that will be used to decrypt the trust store specified via [`server.ssl.truststore.path`](#server-ssl-truststore-path). If the trust store has no password, leave this unset. If the trust store has an empty password, set this to `""`. - -`server.ssl.redirectHttpFromPort` -: {{kib}} binds to this port and redirects all http requests to https over the port configured as [`server.port`](#server-port). - -$$$server-ssl-supportedProtocols$$$ `server.ssl.supportedProtocols` -: An array of supported protocols with versions. Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. **Default: TLSv1.2, TLSv1.3** Enabling `TLSv1.1` would require both setting the `--tls-min-1.1` option in the `node.options` configuration and adding `TLSv1.1` to `server.ssl.supportedProtocols`. `HTTP/2` requires the use of minimum `TLSv1.2` for secure connections. - -$$$server-uuid$$$ `server.uuid` -: The unique identifier for this {{kib}} instance. It must be a valid UUIDv4. It gets automatically generated on the first startup if not specified and persisted in the `data` path. - -$$$settings-xsrf-allowlist$$$ `server.xsrf.allowlist` -: It is not recommended to disable protections for arbitrary API endpoints. Instead, supply the `kbn-xsrf` header. The [`server.xsrf.allowlist`](#settings-xsrf-allowlist) setting requires the following format: - - ```text - *Default: [ ]* An array of API endpoints which should be exempt from Cross-Site Request Forgery ("XSRF") protections. - ``` - - -$$$settings-xsrf-disableProtection$$$ `server.xsrf.disableProtection` -: Setting this to `true` will completely disable Cross-site request forgery protection in Kibana. This is not recommended. **Default: `false`** - -`status.allowAnonymous` -: If authentication is enabled, setting this to `true` enables unauthenticated users to access the {{kib}} server status API and status page. **Default: `false`** - -$$$telemetry-allowChangingOptInStatus$$$ `telemetry.allowChangingOptInStatus` -: When `false`, users cannot change the opt-in status through [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md), and {{kib}} only looks at the value of [`telemetry.optIn`](#settings-telemetry-optIn) to determine whether to send telemetry data or not. **Default: `true`**. - -$$$settings-telemetry-optIn$$$ `telemetry.optIn` -: Set to `false` to stop sending any telemetry data to Elastic. Reporting your cluster statistics helps us improve your user experience. When `false`, the telemetry data is never sent to Elastic.
- - This setting can be changed at any time in [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md). To prevent users from changing it, set [`telemetry.allowChangingOptInStatus`](#telemetry-allowChangingOptInStatus) to `false`. **Default: `true`** - - -`vis_type_vega.enableExternalUrls` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Set this value to true to allow Vega to use any URL to access external data sources and images. When false, Vega can only get data from {{es}}. **Default: `false`** - -`xpack.ccr.ui.enabled` -: Set this value to false to disable the Cross-Cluster Replication UI. **Default: `true`** - -$$$settings-explore-data-in-context$$$ `xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled` -: Enables the **Explore underlying data** option that allows you to open **Discover** from a dashboard panel and view the panel data. **Default: `false`** - - When you create visualizations using the **Lens** drag-and-drop editor, you can use the toolbar to open and explore your data in **Discover**. For more information, check out [Explore the data in Discover](../../../explore-analyze/visualize/lens.md#explore-lens-data-in-discover). - - -$$$settings-explore-data-in-chart$$$ `xpack.discoverEnhanced.actions.exploreDataInChart.enabled` -: Enables you to view the underlying documents in a data series from a dashboard panel. **Default: `false`** - -`xpack.ilm.ui.enabled` -: Set this value to false to disable the Index Lifecycle Policies UI. **Default: `true`** - -`xpack.index_management.ui.enabled` -: Set this value to false to disable the Index Management UI. **Default: `true`** - -`xpack.license_management.ui.enabled` -: Set this value to false to disable the License Management UI. **Default: `true`** - -`xpack.remote_clusters.ui.enabled` -: Set this value to false to disable the Remote Clusters UI. **Default: `true`** - -`xpack.rollup.ui.enabled` -: Set this value to false to disable the Rollup Jobs UI. **Default: true** - - ::::{admonition} Deprecated in 8.11.0. - :class: warning - - Rollups are deprecated and will be removed in a future version. Use [downsampling](../../../manage-data/data-store/data-streams/downsampling-time-series-data-stream.md) instead. - :::: - - -`xpack.snapshot_restore.ui.enabled` -: Set this value to false to disable the Snapshot and Restore UI. **Default: true** - -`xpack.upgrade_assistant.ui.enabled` -: Set this value to false to disable the Upgrade Assistant UI. **Default: true** - -`i18n.locale` ![logo cloud](https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg "Supported on {{ech}}") -: Set this value to change the {{kib}} interface language. Valid locales are: `en`, `zh-CN`, `ja-JP`, `fr-FR`. **Default: `en`** \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index eaf563dc56..1c5c238e1d 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -1,6 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html +applies_to: + deployment: + self: --- # Important settings configuration [important-settings] @@ -19,9 +22,6 @@ mapped_pages: * [JVM fatal error log setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#error-file-path) * [Cluster backups](#important-settings-backups) -Our [{{ecloud}}](https://cloud.elastic.co/registration?page=docs&placement=docs-body) service configures these items automatically, making your cluster production-ready by default. - - ## Path settings [path-settings] {{es}} writes the data you index to indices and data streams to a `data` directory. {{es}} writes its own application logs, which contain information about cluster health and operations, to a `logs` directory. @@ -54,14 +54,16 @@ path: ``` :::::: +{{es}} offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path.md#multiple-data-paths). + ::::::: ::::{warning} -Don’t modify anything within the data directory or run processes that might interfere with its contents. If something other than {{es}} modifies the contents of the data directory, then {{es}} may fail, reporting corruption or other data inconsistencies, or may appear to work correctly having silently lost some of your data. Don’t attempt to take filesystem backups of the data directory; there is no supported way to restore such a backup. Instead, use [Snapshot and restore](../../tools/snapshot-and-restore.md) to take backups safely. Don’t run virus scanners on the data directory. A virus scanner can prevent {{es}} from working correctly and may modify the contents of the data directory. The data directory contains no executables so a virus scan will only find false positives. -:::: - - -{{es}} offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path.md#multiple-data-paths). +* Don’t modify anything within the data directory or run processes that might interfere with its contents. + If something other than {{es}} modifies the contents of the data directory, then {{es}} may fail, reporting corruption or other data inconsistencies, or may appear to work correctly having silently lost some of your data. +* Don’t attempt to take filesystem backups of the data directory; there is no supported way to restore such a backup. Instead, use [Snapshot and restore](../../tools/snapshot-and-restore.md) to take backups safely. +* Don’t run virus scanners on the data directory. A virus scanner can prevent {{es}} from working correctly and may modify the contents of the data directory. The data directory contains no executables so a virus scan will only find false positives. +:::: ## Cluster name setting [_cluster_name_setting] @@ -107,14 +109,14 @@ When you provide a value for `network.host`, {{es}} assumes that you are moving ## Discovery and cluster formation settings [discovery-settings] -Configure two important discovery and cluster formation settings before going to production so that nodes in the cluster can discover each other and elect a master node. +Configure two important discovery and cluster formation settings before going to production so that nodes in the cluster can [discover](/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md) each other and elect a master node. ### `discovery.seed_hosts` [unicast.hosts] Out of the box, without any network configuration, {{es}} will bind to the available loopback addresses and scan local ports `9300` to `9305` to connect with other nodes running on the same server. This behavior provides an auto-clustering experience without having to do any configuration. -When you want to form a cluster with nodes on other hosts, use the [static](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#static-cluster-setting) `discovery.seed_hosts` setting. This setting provides a list of other nodes in the cluster that are master-eligible and likely to be live and contactable to seed the [discovery process](../../distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md). This setting accepts a YAML sequence or array of the addresses of all the master-eligible nodes in the cluster. Each address can be either an IP address or a hostname that resolves to one or more IP addresses via DNS. +When you want to form a cluster with nodes on other hosts, use the [static](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#static-cluster-setting) `discovery.seed_hosts` setting. This setting provides a list of other nodes in the cluster that are master-eligible and likely to be live and contactable to seed the [discovery process](/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md). This setting accepts a YAML sequence or array of the addresses of all the master-eligible nodes in the cluster. Each address can be either an IP address or a hostname that resolves to one or more IP addresses via DNS. ```yaml discovery.seed_hosts: @@ -191,7 +193,7 @@ To see further options not contained in the original JEP, see [Enable Logging wi Change the default GC log output location to `/opt/my-app/gc.log` by creating `$ES_HOME/config/jvm.options.d/gc.options` with some sample options: -```shell +```sh # Turn off all previous logging configuratons -Xlog:disable @@ -230,9 +232,9 @@ These are logs produced by the JVM when it encounters a fatal error, such as a s ## Cluster backups [important-settings-backups] -In a disaster, [snapshots](../../tools/snapshot-and-restore.md) can prevent permanent data loss. [{{slm-cap}}](../../tools/snapshot-and-restore/create-snapshots.md#automate-snapshots-slm) is the easiest way to take regular backups of your cluster. For more information, see [*Create a snapshot*](../../tools/snapshot-and-restore/create-snapshots.md). +In a disaster, [snapshots](../../tools/snapshot-and-restore.md) can prevent permanent data loss. [{{slm-cap}}](../../tools/snapshot-and-restore/create-snapshots.md#automate-snapshots-slm) is the easiest way to take regular backups of your cluster. For more information, see [](../../tools/snapshot-and-restore/create-snapshots.md). ::::{warning} -**Taking a snapshot is the only reliable and supported way to back up a cluster.** You cannot back up an {{es}} cluster by making copies of the data directories of its nodes. There are no supported methods to restore any data from a filesystem-level backup. If you try to restore a cluster from such a backup, it may fail with reports of corruption or missing files or other data inconsistencies, or it may appear to have succeeded having silently lost some of your data. +**Taking a snapshot is the only reliable and supported way to back up a cluster.** You cannot back up an {{es}} cluster by making copies of the data directories of its nodes. There are no supported methods to restore any data from a file system-level backup. If you try to restore a cluster from such a backup, it may fail with reports of corruption or missing files or other data inconsistencies, or it may appear to have succeeded having silently lost some of your data. :::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 602ff526ae..f33d08a609 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -120,12 +120,10 @@ sudo dpkg -i elasticsearch-{{stack-version}}-amd64.deb ## Step 5: Run {{es}} with `systemd` [running-systemd] -:::{include} _snippets/systemd.md +:::{include} _snippets/systemd-startup.md ::: -### Start {{es}} automatically - -:::{include} _snippets/systemd-startup.md +:::{include} _snippets/systemd.md ::: ### Log to the systemd journal diff --git a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md similarity index 61% rename from deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md rename to deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 9d4f98098d..8d3fdb4079 100644 --- a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -4,25 +4,30 @@ mapped_pages: - https://www.elastic.co/guide/en/kibana/current/targz.html sub: stack-version: "9.0.0" +navigation_title: "Linux and MacOS" +applies_to: + deployment: + self: --- - - -# Install from archive on Linux or macOS [targz] +# Install {{kib}} from archive on Linux or macOS [targz] {{kib}} is provided for Linux and Darwin as a `.tar.gz` package. These packages are the easiest formats to use when trying out Kibana. -This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. +:::{include} _snippets/trial.md +::: -The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/kib-releases.md +::: ::::{note} macOS is supported for development purposes only and is not covered under the support SLA for [production-supported operating systems](https://www.elastic.co/support/matrix#kibana). :::: +## Step 1: Download and install the archive -## Download and install the Linux 64-bit package [install-linux64] +### Linux [install-linux64] The Linux archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: @@ -36,7 +41,20 @@ cd kibana-{{stack-version}}/ <2> 1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-linux-x86_64.tar.gz: OK`. 2. This directory is known as `$KIBANA_HOME`. -## Download and install the Darwin package [install-darwin64] +## MacOS [install-darwin64] + +The Darwin archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: + +```sh +curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz +curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> +tar -xzf kibana-{{stack-version}}-darwin-x86_64.tar.gz +cd kibana-{{stack-version}}/ <2> +``` + +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-darwin-x86_64.tar.gz: OK`. +2. This directory is known as `$KIBANA_HOME`. + ::::{admonition} macOS Gatekeeper warnings :class: important @@ -53,53 +71,31 @@ Alternatively, you can add a security override if a Gatekeeper popup appears by :::: -The Darwin archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: +## Step 2: Start {{es}} and generate an enrollment token for {{kib}} [targz-enroll] -```sh -curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz -curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> -tar -xzf kibana-{{stack-version}}-darwin-x86_64.tar.gz -cd kibana-{{stack-version}}/ <2> -``` - -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-darwin-x86_64.tar.gz: OK`. -2. This directory is known as `$KIBANA_HOME`. +[Start {{es}}](/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md). -## Start {{es}} and generate an enrollment token for {{kib}} [targz-enroll] +:::{include} _snippets/auto-security-config.md +::: -When you start {{es}} for the first time, the following security configuration occurs automatically: +:::{include} _snippets/new-enrollment-token.md +::: -* [Certificates and keys](installing-elasticsearch.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. -* The TLS configuration settings are written to `elasticsearch.yml`. -* A password is generated for the `elastic` user. -* An enrollment token is generated for {{kib}}. - -You can then start {{kib}} and enter the enrollment token to securely connect {{kib}} with {{es}}. The enrollment token is valid for 30 minutes. - -## Run {{kib}} from the command line [targz-running] +## Step 3: Run {{kib}} from the command line [targz-running] {{kib}} can be started from the command line as follows: ```sh ./bin/kibana ``` +By default, {{kib}} runs in the foreground, prints its logs to the standard output (`stdout`), and can be stopped by pressing `Ctrl`+`C`. -By default, {{kib}} runs in the foreground, prints its logs to the standard output (`stdout`), and can be stopped by pressing **Ctrl-C**. - -If this is the first time you’re starting {{kib}}, this command generates a unique link in your terminal to enroll your {{kib}} instance with {{es}}. - -1. In your terminal, click the generated link to open {{kib}} in your browser. -2. In your browser, paste the enrollment token that was generated in the terminal when you started {{es}}, and then click the button to connect your {{kib}} instance with {{es}}. -3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. - -::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. - -:::: +:::{include} _snippets/enroll-steps.md +::: -## Configure {{kib}} via the config file [targz-configuring] +## Step 4: Configure {{kib}} using the config file [targz-configuring] -{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure.md). ## Directory layout of `.tar.gz` archives [targz-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md new file mode 100644 index 0000000000..ae6fb78ae3 --- /dev/null +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -0,0 +1,72 @@ +--- +navigation_title: "Install on Windows" +mapped_pages: + - https://www.elastic.co/guide/en/kibana/current/windows.html +sub: + stack-version: "9.0.0" +navigation_title: "Windows" +applies_to: + deployment: + self: +--- + +# Install {{kib}} on Windows [windows] + +{{kib}} can be installed on Windows using the `.zip` package. + +:::{include} _snippets/trial.md +::: + +:::{include} _snippets/kib-releases.md +::: + +## Step 1: Download and install the `.zip` package [install-windows] + +Download the .zip windows archive for {{kib}} {{stack-version}} from [https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/kibana/kibana-9.0.0-windows-x86_64.zip) + +Unzip it with your favorite unzip tool. This will create a folder called kibana-{{stack-version}}-windows-x86_64, which we will refer to as `$KIBANA_HOME`. In a terminal window, CD to the `$KIBANA_HOME` directory, for instance: + +```sh +CD c:\kibana-{{stack-version}}-windows-x86_64 +``` + +## Step 2: Start {{es}} and generate an enrollment token for {{kib}} [windows-enroll] + +[Start {{es}}](/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md). + +:::{include} _snippets/auto-security-config.md +::: + +:::{include} _snippets/new-enrollment-token.md +::: + +## Step 3: Run {{kib}} from the command line [windows-running] + +{{kib}} can be started from the command line as follows: + +```sh +.\bin\kibana.bat +``` + +By default, {{kib}} runs in the foreground, prints its logs to `STDOUT`, and can be stopped by pressing `Ctrl`+`C`. + +:::{include} _snippets/enroll-steps.md +::: + +## Step 4: Configure {{kib}} using the config file [windows-configuring] + +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure.md). + +## Directory layout of `.zip` archive [windows-layout] + +The `.zip` package is entirely self-contained. All files and directories are, by default, contained within `$KIBANA_HOME` — the directory created when unpacking the archive. + +This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling {{kib}} is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. + +| Type | Description | Default Location | Setting | +| --- | --- | --- | --- | +| home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | +| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | +| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | +| | data | `The location of the data files written to disk by {{kib}} and its plugins` | `$KIBANA_HOME\data` | +| | plugins | `Plugin files location. Each plugin will be contained in a subdirectory.` | `$KIBANA_HOME\plugins` | diff --git a/deploy-manage/deploy/self-managed/install-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md similarity index 59% rename from deploy-manage/deploy/self-managed/install-with-debian-package.md rename to deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index 43e1961e0f..66b7cf91de 100644 --- a/deploy-manage/deploy/self-managed/install-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -4,20 +4,26 @@ mapped_pages: - https://www.elastic.co/guide/en/kibana/current/deb.html sub: stack-version: "9.0.0" +navigation_title: "Debian" +applies_to: + deployment: + self: --- -# Install with Debian package [deb] +# Install {{kib}} with Debian package [deb] The Debian package for {{kib}} can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install {{kib}} on any Debian-based system such as Debian and Ubuntu. -This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. +:::{include} _snippets/trial.md +::: -The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). +:::{include} _snippets/kib-releases.md +::: -## Import the Elastic PGP key [deb-key] +## Step 1: Import the Elastic PGP key [deb-key] :::{include} _snippets/pgp-key.md ::: @@ -26,19 +32,32 @@ The latest stable version of {{kib}} can be found on the [Download Kibana](https wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg ``` -## Install from the APT repository [deb-repo] +## Step 2: Install {{kib}} -You may need to install the `apt-transport-https` package on Debian before proceeding: +You have several options for installing the {{es}} Debian package: -```sh -sudo apt-get install apt-transport-https -``` +* [From the APT repository](#deb-repo) +* [Manually](#install-deb) -Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: +### Install from the APT repository [deb-repo] -```sh -echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-9.x.list -``` +1. You may need to install the `apt-transport-https` package on Debian before proceeding: + + ```sh + sudo apt-get install apt-transport-https + ``` + +2. Save the repository definition to `/etc/apt/sources.list.d/elastic-9.x.list`: + + ```sh + echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-9.x.list + ``` + +3. Install the {{kib}} Debian package: + + ```sh + sudo apt-get update && sudo apt-get install kibana + ``` :::{warning} Do not use `add-apt-repository` as it will add a `deb-src` entry as well, but we do not provide a source package. If you have added the `deb-src` entry, you will see an error like the following: @@ -51,23 +70,17 @@ Unable to find expected entry 'main/source/Sources' in Release file Delete the `deb-src` entry from the `/etc/apt/sources.list` file and the installation should work as expected. ::: -You can install the {{kib}} Debian package with: - -```sh -sudo apt-get update && sudo apt-get install kibana -``` - :::{warning} If two entries exist for the same {{kib}} repository, you will see an error like this during `apt-get update`: ``` -Duplicate sources.list entry https://artifacts.elastic.co/packages/8.x/apt/ ...` +Duplicate sources.list entry https://artifacts.elastic.co/packages/9.x/apt/ ...` ``` -Examine `/etc/apt/sources.list.d/kibana-8.x.list` for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. +Examine `/etc/apt/sources.list.d/kibana-9.x.list` for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. ::: -## Download and install the Debian package manually [install-deb] +### Download and install the Debian package manually [install-deb] The Debian package for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: ```sh @@ -80,25 +93,17 @@ sudo dpkg -i kibana-{{stack-version}}-amd64.deb % version manually specified in the link above -## Start {{es}} and generate an enrollment token for {{kib}} [deb-enroll] - -When you start {{es}} for the first time, the following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. - -The password and certificate and keys are output to your terminal. - -You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: +## Step 3: Start {{es}} and generate an enrollment token for {{kib}} [deb-enroll] -```sh -bin/elasticsearch-create-enrollment-token -s kibana -``` +[Start {{es}}](/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md). -Start {{kib}} and enter the enrollment token to securely connect {{kib}} with {{es}}. +:::{include} _snippets/auto-security-config.md +::: +:::{include} _snippets/new-enrollment-token.md +::: -## Run {{kib}} with `systemd` [deb-running-systemd] +## Step 4: Run {{kib}} with `systemd` [deb-running-systemd] To configure {{kib}} to start automatically when the system starts, run the following commands: @@ -114,13 +119,17 @@ sudo systemctl start kibana.service sudo systemctl stop kibana.service ``` -These commands provide no feedback as to whether {{kib}} was started successfully or not. Log information can be accessed via `journalctl -u kibana.service`. +These commands provide no feedback as to whether {{kib}} was started successfully or not. Log information can be accessed using `journalctl -u kibana.service`. + +## Step 5: Enroll {{kib}} with {{es}} -## Configure {{kib}} via the config file [deb-configuring] +:::{include} _snippets/enroll-systemd.md +::: -{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +## Step 6: Configure {{kib}} using the config file [deb-configuring] +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure.md). ## Directory layout of Debian package [deb-layout] diff --git a/deploy-manage/deploy/self-managed/install-with-docker.md b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md similarity index 93% rename from deploy-manage/deploy/self-managed/install-with-docker.md rename to deploy-manage/deploy/self-managed/install-kibana-with-docker.md index b7c755a963..9a47e3d422 100644 --- a/deploy-manage/deploy/self-managed/install-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md @@ -2,19 +2,26 @@ navigation_title: "Install with Docker" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/docker.html +navigation_title: "Docker" +applies_to: + deployment: + self: +applies_to: + deployment: + self: --- -# Install with Docker [docker] +# Install {{kib}} with Docker [docker] Docker images for {{kib}} are available from the Elastic Docker registry. The base image is [ubuntu:20.04](https://hub.docker.com/_/ubuntu). A list of all published Docker images and tags is available at [www.docker.elastic.co](https://www.docker.elastic.co). The source code is in [GitHub](https://github.com/elastic/dockerfiles/tree/master/kibana). -These images contain both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. - +:::{include} _snippets/trial.md +::: ## Run {{kib}} in Docker for development [run-kibana-on-docker-for-dev] @@ -27,16 +34,8 @@ This setup doesn’t run multiple {{es}} nodes by default. To create a multi-nod ## Hardened Docker images [_hardened_docker_images] -You can also use the hardened [Wolfi](https://wolfi.dev/) image for additional security. Using Wolfi images requires Docker version 20.10.10 or higher. - -To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. - -For example: - -```sh -docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} -``` - +:::{include} _snippets/wolfi.md +::: ## Start a single node cluster [_start_a_single_node_cluster] @@ -131,8 +130,6 @@ docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version} docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic ``` - - ### Remove Docker containers [_remove_docker_containers] To remove the containers and their network, run: diff --git a/deploy-manage/deploy/self-managed/install-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md similarity index 63% rename from deploy-manage/deploy/self-managed/install-with-rpm.md rename to deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 5d6b06e622..164ed4a949 100644 --- a/deploy-manage/deploy/self-managed/install-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -4,12 +4,13 @@ mapped_pages: - https://www.elastic.co/guide/en/kibana/current/rpm.html sub: stack-version: "9.0.0" +navigation_title: "RPM" +applies_to: + deployment: + self: --- - - -# Install with RPM [rpm] - +# Install {{kib}} with RPM [rpm] The RPM for {{kib}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{kib}} on any RPM-based system such as OpenSuSE, SLES, Red Hat, and Oracle Enterprise. @@ -17,17 +18,13 @@ The RPM for {{kib}} can be [downloaded from our website](#install-rpm) or from o RPM install is not supported on distributions with old versions of RPM, such as SLES 11. Refer to [Install from archive on Linux or macOS](install-from-archive-on-linux-macos.md) instead. :::: +:::{include} _snippets/trial.md +::: -This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. - -The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). - -::::{tip} -For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](installing-elasticsearch.md). -:::: - +:::{include} _snippets/kib-releases.md +::: -## Import the Elastic PGP key [rpm-key] +## Step 1: Import the Elastic PGP key [rpm-key] :::{include} _snippets/pgp-key.md ::: @@ -37,7 +34,14 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ``` -## Installing from the RPM repository [rpm-repo] +## Step 2: Install {{kib}} + +You have the following options for installing the {{es}} RPM package: + +* [From the RPM repository](#rpm-repo) +* [Manually](#install-rpm) + +### Install from the RPM repository [rpm-repo] Create a file called `kibana.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: @@ -65,7 +69,7 @@ sudo zypper install kibana <3> 3. Use zypper on OpenSUSE based distributions -## Download and install the RPM manually [install-rpm] +### Download and install the RPM manually [install-rpm] The RPM for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: @@ -76,29 +80,20 @@ shasum -a 512 -c kibana-{{stack-version}}-x86_64.rpm.sha512 <1> sudo rpm --install kibana-{{stack-version}}-x86_64.rpm ``` -1. Compares the SHA of the downloaded RPM and the published checksum, which should output `kibana-{version}-x86_64.rpm: OK`. +1. Compares the SHA of the downloaded RPM and the published checksum, which should output `kibana--x86_64.rpm: OK`. -## Start {{es}} and generate an enrollment token for {{kib}} [rpm-enroll] +## Step 3: Start {{es}} and generate an enrollment token for {{kib}} [rpm-enroll] +[Start {{es}}](/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md). -When you start {{es}} for the first time, the following security configuration occurs automatically: - -* Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. -* Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. - -The password and certificate and keys are output to your terminal. - -You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: - -```sh -bin/elasticsearch-create-enrollment-token -s kibana -``` - -Start {{kib}} and enter the enrollment token to securely connect {{kib}} with {{es}}. +:::{include} _snippets/auto-security-config.md +::: +:::{include} _snippets/new-enrollment-token.md +::: -## Run {{kib}} with `systemd` [rpm-running-systemd] +## Step 4: Run {{kib}} with `systemd` [rpm-running-systemd] To configure {{kib}} to start automatically when the system starts, run the following commands: @@ -114,12 +109,17 @@ sudo systemctl start kibana.service sudo systemctl stop kibana.service ``` -These commands provide no feedback as to whether {{kib}} was started successfully or not. Log information can be accessed via `journalctl -u kibana.service`. +These commands provide no feedback as to whether {{kib}} was started successfully or not. Log information can be accessed using `journalctl -u kibana.service`. -## Configure {{kib}} via the config file [rpm-configuring] +## Step 5: Enroll {{kib}} with {{es}} + +:::{include} _snippets/enroll-systemd.md +::: + +## Step 6: Configure {{kib}} using the config file [rpm-configuring] -{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure.md). ## Directory layout of RPM [rpm-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index a6bc5ebd57..a5efb92613 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -2,25 +2,69 @@ mapped_urls: - https://www.elastic.co/guide/en/kibana/current/setup.html - https://www.elastic.co/guide/en/kibana/current/install.html +applies_to: + deployment: + self: --- -# Install Kibana +# Install {{kib}} -% What needs to be done: Refine +This section includes information on how to setup {{kib}} and get it running, including: -% GitHub issue: https://github.com/elastic/docs-projects/issues/340 -% Scope notes: Good intro / landing page. Merge the content of both, and remove the reference to cloud +* Downloading +* Installing +* Starting +* Configuring +* Upgrading -% Use migrated content from existing pages that map to this page: -% - [ ] ./raw-migrated-files/kibana/kibana/setup.md -% Notes: 5 child docs, all needed -% - [ ] ./raw-migrated-files/kibana/kibana/install.md +## Supported platforms [supported-platforms] -⚠️ **This page is a work in progress.** ⚠️ +Packages of {{kib}} are provided for and tested against Linux, Darwin, and Windows. Since {{kib}} runs on Node.js, we include the necessary Node.js binaries for these platforms. Running {{kib}} against a separately maintained version of Node.js is not supported. -The documentation team is working to combine content pulled from the following pages: +To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} for Linux ships with a custom build of Node.js with glibc 2.17 support. For details, see [Custom builds of Node.js](asciidocalypse://docs/kibana/docs/extend/upgrading-nodejs.md#custom-nodejs-builds). -* [/raw-migrated-files/kibana/kibana/setup.md](/raw-migrated-files/kibana/kibana/setup.md) -* [/raw-migrated-files/kibana/kibana/install.md](/raw-migrated-files/kibana/kibana/install.md) \ No newline at end of file +## {{kib}} install packages [install] + +{{kib}} is provided in the following package formats: + +`tar.gz`/`zip` +: The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. + + The `zip` package is the only supported package for Windows. + + [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md) or [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md) + + +`deb` +: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. + + [Install with Debian package](/deploy-manage/deploy/self-managed/install-with-debian-package.md) + + +`rpm` +: The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. + + [Install with RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) + + +`docker` +: Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. + + [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-with-docker.md) + + +::::{important} +If your {{es}} installation is protected by [{{stack-security-features}}](/deploy-manage/security.md) see [Configuring security in {{kib}}](/deploy-manage/security.md) for additional setup instructions. +:::: + +## {{es}} version [elasticsearch-version] + +{{kib}} should be configured to run against an {{es}} node of the same version. This is the officially supported configuration. + +Running different major version releases of {{kib}} and {{es}} (e.g. {{kib}} 9.x and {{es}} 8.x) is not supported, nor is running a minor version of {{kib}} that is newer than the version of {{es}} (e.g. {{kib}} 8.14 and {{es}} 8.13). + +Running a minor version of {{es}} that is higher than {{kib}} will generally work in order to facilitate an upgrade process where {{es}} is upgraded first (e.g. {{kib}} 8.14 and {{es}} 8.15). In this configuration, a warning will be logged on {{kib}} server startup, so it’s only meant to be temporary until {{kib}} is upgraded to the same version as {{es}}. + +Running different patch version releases of {{kib}} and {{es}} (e.g. {{kib}} 9.0.0 and {{es}} 9.0.1) is generally supported, though we encourage users to run the same versions of {{kib}} and {{es}} down to the patch version. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-on-windows.md b/deploy-manage/deploy/self-managed/install-on-windows.md deleted file mode 100644 index 69ecf244a0..0000000000 --- a/deploy-manage/deploy/self-managed/install-on-windows.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -navigation_title: "Install on Windows" -mapped_pages: - - https://www.elastic.co/guide/en/kibana/current/windows.html -sub: - stack-version: "9.0.0" ---- - - - -# Install on Windows [windows] - -{{kib}} can be installed on Windows using the `.zip` package. - -This package contains both free and subscription features. [Start a 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md) to try out all of the features. - -The latest stable version of {{kib}} can be found on the [Download Kibana](https://elastic.co/downloads/kibana) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). - -## Download and install the `.zip` package [install-windows] - -Download the .zip windows archive for {{kib}} {{stack-version}} from [https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-windows-x86_64.zip](https://artifacts.elastic.co/downloads/kibana/kibana-9.0.0-windows-x86_64.zip) - -Unzip it with your favourite unzip tool. This will create a folder called kibana-{{stack-version}}-windows-x86_64, which we will refer to as $KIBANA_HOME. In a terminal window, CD to the `$KIBANA_HOME` directory, for instance: - -```sh -CD c:\kibana-{{stack-version}}-windows-x86_64 -``` - -## Start {{es}} and generate an enrollment token for {{kib}} [windows-enroll] - - -When you start {{es}} for the first time, the following security configuration occurs automatically: - -* [Certificates and keys](installing-elasticsearch.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. -* The TLS configuration settings are written to `elasticsearch.yml`. -* A password is generated for the `elastic` user. -* An enrollment token is generated for {{kib}}. - -You can then start {{kib}} and enter the enrollment token to securely connect {{kib}} with {{es}}. The enrollment token is valid for 30 minutes. - - -## Run {{kib}} from the command line [windows-running] - -{{kib}} can be started from the command line as follows: - -```sh -.\bin\kibana.bat -``` - -By default, {{kib}} runs in the foreground, prints its logs to `STDOUT`, and can be stopped by pressing **Ctrl-C**. - -If this is the first time you’re starting {{kib}}, this command generates a unique link in your terminal to enroll your {{kib}} instance with {{es}}. - -1. In your terminal, click the generated link to open {{kib}} in your browser. -2. In your browser, paste the enrollment token that was generated in the terminal when you started {{es}}, and then click the button to connect your {{kib}} instance with {{es}}. -3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. - -::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. - -:::: - - - -## Configure {{kib}} via the config file [windows-configuring] - -{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [Configuring Kibana](configure.md). - - -## Directory layout of `.zip` archive [windows-layout] - -The `.zip` package is entirely self-contained. All files and directories are, by default, contained within `$KIBANA_HOME` — the directory created when unpacking the archive. - -This is very convenient because you don’t have to create any directories to start using Kibana, and uninstalling {{kib}} is as easy as removing the `$KIBANA_HOME` directory. However, it is advisable to change the default locations of the config and data directories so that you do not delete important data later on. - -| Type | Description | Default Location | Setting | -| --- | --- | --- | --- | -| home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | -| bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | -| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | -| | data | `The location of the data files written to disk by {{kib}} and its plugins` | `$KIBANA_HOME\data` | -| | plugins | `Plugin files location. Each plugin will be contained in a subdirectory.` | `$KIBANA_HOME\plugins` | diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index a72b1e77b8..08637845ee 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -2,66 +2,10 @@ mapped_urls: - https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html - https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-stack-security.html - - https://www.elastic.co/guide/en/elastic-stack/current/installing-stack-demo-self.html --- # Install {{es}} [install-elasticsearch] -% What needs to be done: Refine - -% GitHub issue: https://github.com/elastic/docs-projects/issues/340 - -% Scope notes: From the initial doc remove the reference to cloud based and the title self-managed, as this is already self-managed. We have to merge and decide the final content considering the first two documents. The one with "security" in the name doesn't really differ from the first one, as now security is enabled by default, so it's not needed anymore, but it has interesting details that might be needed on the legacy installation docs. The third doc (tutorial style) can be omitted, we leave it here for redirection purposes only. - -% Use migrated content from existing pages that map to this page: - -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/install-elasticsearch.md -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md -% - [ ] ./raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md - -% Internal links rely on the following IDs being on this page (e.g. as a heading ID, paragraph ID, etc): - -$$$elasticsearch-deployment-options$$$ - -$$$jvm-version$$$ - -$$$stack-security-certificates$$$ - -$$$stack-skip-auto-configuration$$$ - -$$$elasticsearch-docker-images$$$ - -$$$elasticsearch-install-packages$$$ - -$$$install-stack-self-elastic-agent$$$ - -$$$install-stack-self-elasticsearch-config$$$ - -$$$install-stack-self-elasticsearch-first$$$ - -$$$install-stack-self-elasticsearch-second$$$ - -$$$install-stack-self-elasticsearch-start$$$ - -$$$install-stack-self-elasticsearch-third$$$ - -$$$install-stack-self-fleet-server$$$ - -$$$install-stack-self-kibana$$$ - -$$$install-stack-self-next-steps$$$ - -$$$install-stack-self-overview$$$ - -$$$install-stack-self-prereqs$$$ - -$$$install-stack-self-view-data$$$ - -**This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: - -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md) -* [/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md](/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md) - If you want to install and manage {{es}} yourself, you can: * Run {{es}} using a [Linux, MacOS, or Windows install package](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). diff --git a/deploy-manage/deploy/self-managed/other-configuration-settings.md b/deploy-manage/deploy/self-managed/other-configuration-settings.md deleted file mode 100644 index d03724905b..0000000000 --- a/deploy-manage/deploy/self-managed/other-configuration-settings.md +++ /dev/null @@ -1,9 +0,0 @@ -# Other configuration settings - -% What needs to be done: Write from scratch - -% GitHub issue: https://github.com/elastic/docs-projects/issues/340 - -% Scope notes: Link to reference documentation? Where are we going to allocate the rest of the config settings? Reference? - -⚠️ **This page is a work in progress.** ⚠️ \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/plugins.md b/deploy-manage/deploy/self-managed/plugins.md index 09dde45fe6..e38e04d421 100644 --- a/deploy-manage/deploy/self-managed/plugins.md +++ b/deploy-manage/deploy/self-managed/plugins.md @@ -1,13 +1,15 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html +applies_to: + deployment: + self: --- -# Plugins [modules-plugins] +# Add plugins [modules-plugins] Plugins are a way to enhance the basic {{es}} functionality in a custom manner. They range from adding custom mapping types, custom analyzers (in a more built in fashion), custom script engines, custom discovery and more. -For information about selecting and installing plugins, see [{{es}} Plugins and Integrations](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/index.md). - -For information about developing your own plugin, see [Help for plugin authors](asciidocalypse://docs/elasticsearch/docs/extend/index.md). +For information about selecting and installing plugins, see [{{es}} plugins](elasticsearch://reference/elasticsearch-plugins/index.md). +For information about developing your own plugin, see [Create {{es}} plugins](elasticsearch://extend/index.md). \ No newline at end of file diff --git a/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md b/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md index 6a02f33975..aa99242852 100644 --- a/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md +++ b/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md @@ -52,6 +52,50 @@ You then configure {{kib}} and Beats to communicate with {{es}} using TLS so tha +## Cases when security auto configuration is skipped [stack-skip-auto-configuration] + +When you start {{es}} for the first time, the node startup process tries to automatically configure security for you. The process runs some checks to determine: + +* If this is the first time that the node is starting +* Whether security is already configured +* If the startup process can modify the node configuration + +If any of those checks fail, there’s a good indication that you [manually configured security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md), or don’t want security to be configured automatically. In these cases, the node starts normally using the existing configuration. + +::::{important} +If you redirect {{es}} output to a file, security autoconfiguration is skipped. Autoconfigured credentials can only be viewed on the terminal the first time you start {{es}}. If you need to redirect output to a file, start {{es}} without redirection the first time and use redirection on all subsequent starts. +:::: + + + +### Existing environment detected [stack-existing-environment-detected] + +If certain directories already exist, there’s a strong indication that the node was started previously. Similarly, if certain files *don’t* exist, or we can’t read or write to specific files or directories, then we’re likely not running as the user who installed {{es}} or an administrator imposed restrictions. If any of the following environment checks are true, security isn’t configured automatically. + +The {{es}} `/data` directory exists and isn’t empty +: The existence of this directory is a strong indicator that the node was started previously, and might already be part of a cluster. + +The `elasticsearch.yml` file doesn’t exist (or isn’t readable), or the `elasticsearch.keystore` isn’t readable +: If either of these files aren’t readable, we can’t determine whether {{es}} security features are already enabled. This state can also indicate that the node startup process isn’t running as a user with sufficient privileges to modify the node configuration. + +The {{es}} configuration directory isn’t writable +: This state likely indicates that an administrator made this directory read-only, or that the user who is starting {{es}} is not the user that installed {{es}}. + + +### Existing settings detected [stack-existing-settings-detected] + +The following settings are incompatible with security auto configuration. If any of these settings exist, the node startup process skips configuring security automatically and the node starts normally. + +* [`node.roles`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) is set to a value where the node can’t be elected as `master`, or if the node can’t hold data +* [`xpack.security.autoconfiguration.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings) is set to `false` +* [`xpack.security.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings) has a value set +* Any of the [`xpack.security.transport.ssl.*`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#transport-tls-ssl-settings) or [`xpack.security.http.ssl.*`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#http-tls-ssl-settings) settings have a value set in the `elasticsearch.yml` configuration file or in the `elasticsearch.keystore` +* Any of the `discovery.type`, `discovery.seed_hosts`, or `cluster.initial_master_nodes` [discovery and cluster formation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md) have a value set + + ::::{note} + Exceptions are when `discovery.type` is set to `single-node`, or when `cluster.initial_master_nodes` exists but contains only the name of the current node. + + :::: diff --git a/deploy-manage/toc.yml b/deploy-manage/toc.yml index 7beaed6e2c..c5f47a1f8b 100644 --- a/deploy-manage/toc.yml +++ b/deploy-manage/toc.yml @@ -355,17 +355,16 @@ toc: - file: deploy/self-managed/configure-elasticsearch.md children: - file: deploy/self-managed/important-settings-configuration.md - - file: deploy/self-managed/other-configuration-settings.md - file: deploy/self-managed/plugins.md - file: deploy/self-managed/install-kibana.md children: - - file: deploy/self-managed/install-from-archive-on-linux-macos.md - - file: deploy/self-managed/install-on-windows.md - - file: deploy/self-managed/install-with-debian-package.md - - file: deploy/self-managed/install-with-rpm.md - - file: deploy/self-managed/install-with-docker.md - - file: deploy/self-managed/configure.md - - file: deploy/self-managed/access.md + - file: deploy/self-managed/install-kibana-from-archive-on-linux-macos.md + - file: deploy/self-managed/install-kibana-on-windows.md + - file: deploy/self-managed/install-kibana-with-debian-package.md + - file: deploy/self-managed/install-kibana-with-rpm.md + - file: deploy/self-managed/install-kibana-with-docker.md + - file: deploy/self-managed/configure-kibana.md + - file: deploy/self-managed/access-kibana.md - file: deploy/self-managed/air-gapped-install.md - file: deploy/self-managed/tools-apis.md - file: deploy/kibana-reporting-configuration.md diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md deleted file mode 100644 index 37e372178b..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/configuring-stack-security.md +++ /dev/null @@ -1,237 +0,0 @@ -# Start the {{stack}} with security enabled automatically [configuring-stack-security] - -When you start {{es}} for the first time, the following security configuration occurs automatically: - -* [Certificates and keys](/deploy-manage/security/security-certificates-keys.md#stack-security-certificates) for TLS are generated for the transport and HTTP layers. -* The TLS configuration settings are written to `elasticsearch.yml`. -* A password is generated for the `elastic` user. -* An enrollment token is generated for {{kib}}. - -You can then start {{kib}} and enter the enrollment token, which is valid for 30 minutes. This token automatically applies the security settings from your {{es}} cluster, authenticates to {{es}} with the built-in `kibana` service account, and writes the security configuration to `kibana.yml`. - -::::{note} -There are [some cases](/deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. -:::: - - - -## Prerequisites [_prerequisites_12] - -* [Download](https://www.elastic.co/downloads/elasticsearch) and unpack the `elasticsearch` package distribution for your environment. -* [Download](https://www.elastic.co/downloads/kibana) and unpack the `kibana` package distribution for your environment. - - -## Start {{es}} and enroll {{kib}} with security enabled [stack-start-with-security] - -1. From the installation directory, start {{es}}. - - ```shell - bin/elasticsearch - ``` - - The command prints the `elastic` user password and an enrollment token for {{kib}}. - -2. Copy the generated `elastic` password and enrollment token. These credentials are only shown when you start {{es}} for the first time. - - ::::{note} - If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. - - :::: - - - We recommend storing the `elastic` password as an environment variable in your shell. Example: - - ```sh - export ELASTIC_PASSWORD="your_password" - ``` - -3. (Optional) Open a new terminal and verify that you can connect to your {{es}} cluster by making an authenticated call. - - ```shell - curl --cacert config/certs/http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 - ``` - -4. From the directory where you installed {{kib}}, start {{kib}}. - - ```shell - bin/kibana - ``` - -5. Enroll {{kib}} using either interactive or detached mode. - - * **Interactive mode** (browser) - - 1. In your terminal, click the generated link to open {{kib}} in your browser. - 2. In your browser, paste the enrollment token that you copied and click the button to connect your {{kib}} instance with {{es}}. - - ::::{note} - {{kib}} won’t enter interactive mode if it detects existing credentials for {{es}} (`elasticsearch.username` and `elasticsearch.password`) or an existing URL for `elasticsearch.hosts`. - - :::: - - * **Detached mode** (non-browser) - - Run the `kibana-setup` tool and pass the generated enrollment token with the `--enrollment-token` parameter. - - ```sh - bin/kibana-setup --enrollment-token - ``` - - - -## Enroll additional nodes in your cluster [stack-enroll-nodes] - -When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. - -Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. - -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. - -To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. - -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - - ```sh - bin/elasticsearch-create-enrollment-token -s node - ``` - - Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. - -2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - - ```sh - bin/elasticsearch --enrollment-token - ``` - - {{es}} automatically generates certificates and keys in the following directory: - - ```sh - config/certs - ``` - -3. Repeat the previous step for any new nodes that you want to enroll. - - -## Connect clients to {{es}} [_connect_clients_to_es_5] - -When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -The hex-encoded SHA-256 fingerprint of this certificate is also output to the terminal. Any clients that connect to {{es}}, such as the [{{es}} Clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html), {{beats}}, standalone {{agent}}s, and {{ls}} must validate that they trust the certificate that {{es}} uses for HTTPS. {{fleet-server}} and {{fleet}}-managed {{agent}}s are automatically configured to trust the CA certificate. Other clients can establish trust by using either the fingerprint of the CA certificate or the CA certificate itself. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate. You can also copy the CA certificate to your machine and configure your client to use it. - - -### Use the CA fingerprint [_use_the_ca_fingerprint_5] - -Copy the fingerprint value that’s output to your terminal when {{es}} starts, and configure your client to use this fingerprint to establish trust when it connects to {{es}}. - -If the auto-configuration process already completed, you can still obtain the fingerprint of the security certificate by running the following command. The path is to the auto-generated CA certificate for the HTTP layer. - -```sh -openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt -``` - -The command returns the security certificate, including the fingerprint. The `issuer` should be `Elasticsearch security auto-configuration HTTP CA`. - -```sh -issuer= /CN=Elasticsearch security auto-configuration HTTP CA -SHA256 Fingerprint= -``` - - -### Use the CA certificate [_use_the_ca_certificate_5] - -If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: - -```sh -/etc/elasticsearch/certs/http_ca.crt -``` - -Copy the `http_ca.crt` file to your machine and configure your client to use this certificate to establish trust when it connects to {{es}}. - - -## What’s next? [_whats_next] - -Congratulations! You’ve successfully started the {{stack}} with security enabled. {{es}} and {{kib}} are secured with TLS on the HTTP layer, and internode communication is encrypted. If you want to enable HTTPS for web traffic, you can [encrypt traffic between your browser and {{kib}}](../../../deploy-manage/security/set-up-basic-security-plus-https.md#encrypt-kibana-browser). - - -## Security certificates and keys [stack-security-certificates] - -When you install {{es}}, the following certificates and keys are generated in the {{es}} configuration directory, which are used to connect a {{kib}} instance to your secured {{es}} cluster and to encrypt internode communication. The files are listed here for reference. - -`http_ca.crt` -: The CA certificate that is used to sign the certificates for the HTTP layer of this {{es}} cluster. - -`http.p12` -: Keystore that contains the key and certificate for the HTTP layer for this node. - -`transport.p12` -: Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. - -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../../deploy-manage/security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. - -Use the following command to retrieve the password for `http.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.http.ssl.keystore.secure_password -``` - -Use the following command to retrieve the password for `transport.p12`: - -```sh -bin/elasticsearch-keystore show xpack.security.transport.ssl.keystore.secure_password -``` - -Additionally, when you use the enrollment token to connect {{kib}} to a secured {{es}} cluster, the HTTP layer CA certificate is retrieved from {{es}} and stored in the {{kib}} `/data` directory. This file establishes trust between {{kib}} and the {{es}} Certificate Authority (CA) for the HTTP layer. - - -## Cases when security auto configuration is skipped [stack-skip-auto-configuration] - -When you start {{es}} for the first time, the node startup process tries to automatically configure security for you. The process runs some checks to determine: - -* If this is the first time that the node is starting -* Whether security is already configured -* If the startup process can modify the node configuration - -If any of those checks fail, there’s a good indication that you [manually configured security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md), or don’t want security to be configured automatically. In these cases, the node starts normally using the existing configuration. - -::::{important} -If you redirect {{es}} output to a file, security autoconfiguration is skipped. Autoconfigured credentials can only be viewed on the terminal the first time you start {{es}}. If you need to redirect output to a file, start {{es}} without redirection the first time and use redirection on all subsequent starts. -:::: - - - -### Existing environment detected [stack-existing-environment-detected] - -If certain directories already exist, there’s a strong indication that the node was started previously. Similarly, if certain files *don’t* exist, or we can’t read or write to specific files or directories, then we’re likely not running as the user who installed {{es}} or an administrator imposed restrictions. If any of the following environment checks are true, security isn’t configured automatically. - -The {{es}} `/data` directory exists and isn’t empty -: The existence of this directory is a strong indicator that the node was started previously, and might already be part of a cluster. - -The `elasticsearch.yml` file doesn’t exist (or isn’t readable), or the `elasticsearch.keystore` isn’t readable -: If either of these files aren’t readable, we can’t determine whether {{es}} security features are already enabled. This state can also indicate that the node startup process isn’t running as a user with sufficient privileges to modify the node configuration. - -The {{es}} configuration directory isn’t writable -: This state likely indicates that an administrator made this directory read-only, or that the user who is starting {{es}} is not the user that installed {{es}}. - - -### Existing settings detected [stack-existing-settings-detected] - -The following settings are incompatible with security auto configuration. If any of these settings exist, the node startup process skips configuring security automatically and the node starts normally. - -* [`node.roles`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) is set to a value where the node can’t be elected as `master`, or if the node can’t hold data -* [`xpack.security.autoconfiguration.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings) is set to `false` -* [`xpack.security.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings) has a value set -* Any of the [`xpack.security.transport.ssl.*`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#transport-tls-ssl-settings) or [`xpack.security.http.ssl.*`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#http-tls-ssl-settings) settings have a value set in the `elasticsearch.yml` configuration file or in the `elasticsearch.keystore` -* Any of the `discovery.type`, `discovery.seed_hosts`, or `cluster.initial_master_nodes` [discovery and cluster formation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md) have a value set - - ::::{note} - Exceptions are when `discovery.type` is set to `single-node`, or when `cluster.initial_master_nodes` exists but contains only the name of the current node. - - :::: - - diff --git a/raw-migrated-files/kibana/kibana/install.md b/raw-migrated-files/kibana/kibana/install.md deleted file mode 100644 index 03fd307d96..0000000000 --- a/raw-migrated-files/kibana/kibana/install.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install {{kib}} [install] - - -## {{kib}} on Elastic Cloud [_kib_on_elastic_cloud] - -If you are using Elastic Cloud, you access Kibana with a single click. (You can [sign up for a free trial](https://cloud.elastic.co/registration?page=docs&placement=docs-body) and start exploring data in minutes.) - - -## Install {{kib}} yourself [_install_kib_yourself] - -::::{tip} -For a step-by-step example of setting up the {{stack}} on your own premises, try out our tutorial: [Installing a self-managed Elastic Stack](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md). -:::: - - -::::{note} -Starting with version 6.0.0, Kibana only supports 64 bit operating systems. -:::: - - -Kibana is provided in the following package formats: - -`tar.gz`/`zip` -: The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with Kibana. - - The `zip` package is the only supported package for Windows. - - [Install from archive on Linux or macOS](../../../deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md) or [Install on Windows](../../../deploy-manage/deploy/self-managed/install-on-windows.md) - - -`deb` -: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. - - [Install with Debian package](../../../deploy-manage/deploy/self-managed/install-with-debian-package.md) - - -`rpm` -: The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. - - [Install with RPM](../../../deploy-manage/deploy/self-managed/install-with-rpm.md) - - -`docker` -: Images are available for running Kibana as a Docker container. They may be downloaded from the Elastic Docker Registry. - - [Running Kibana on Docker](../../../deploy-manage/deploy/self-managed/install-with-docker.md) - - -::::{important} -If your Elasticsearch installation is protected by [{{stack-security-features}}](/deploy-manage/security.md) see [Configuring security in {{kib}}](../../../deploy-manage/security.md) for additional setup instructions. -:::: - - - - - - - diff --git a/raw-migrated-files/kibana/kibana/setup.md b/raw-migrated-files/kibana/kibana/setup.md deleted file mode 100644 index 9aba19ba61..0000000000 --- a/raw-migrated-files/kibana/kibana/setup.md +++ /dev/null @@ -1,28 +0,0 @@ -# Set up [setup] - -This section includes information on how to setup Kibana and get it running, including: - -* Downloading -* Installing -* Starting -* Configuring -* Upgrading - - -## Supported platforms [supported-platforms] - -Packages of Kibana are provided for and tested against Linux, Darwin, and Windows. Since Kibana runs on Node.js, we include the necessary Node.js binaries for these platforms. Running Kibana against a separately maintained version of Node.js is not supported. - -To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} for Linux ships with a custom build of Node.js with glibc 2.17 support. For details, see [Custom builds of Node.js](asciidocalypse://docs/kibana/docs/extend/upgrading-nodejs.md#custom-nodejs-builds). - - -## Elasticsearch version [elasticsearch-version] - -Kibana should be configured to run against an Elasticsearch node of the same version. This is the officially supported configuration. - -Running different major version releases of Kibana and Elasticsearch (e.g. Kibana 5.x and Elasticsearch 2.x) is not supported, nor is running a minor version of Kibana that is newer than the version of Elasticsearch (e.g. Kibana 5.1 and Elasticsearch 5.0). - -Running a minor version of Elasticsearch that is higher than Kibana will generally work in order to facilitate an upgrade process where Elasticsearch is upgraded first (e.g. Kibana 5.0 and Elasticsearch 5.1). In this configuration, a warning will be logged on Kibana server startup, so it’s only meant to be temporary until Kibana is upgraded to the same version as Elasticsearch. - -Running different patch version releases of Kibana and Elasticsearch (e.g. Kibana 5.0.0 and Elasticsearch 5.0.1) is generally supported, though we encourage users to run the same versions of Kibana and Elasticsearch down to the patch version. - From 90e854a6b70dacdd4c7ee66300d6e678f1d7eb3a Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 6 Mar 2025 23:55:28 -0500 Subject: [PATCH 11/43] air gapped --- .../deploy/self-managed/air-gapped-install.md | 296 +++++++++++++++--- 1 file changed, 256 insertions(+), 40 deletions(-) diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index 37a2fb7930..6c11f9fcac 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -2,85 +2,301 @@ mapped_urls: - https://www.elastic.co/guide/en/elastic-stack/current/air-gapped-install.html - https://www.elastic.co/guide/en/cloud-enterprise/current/ece-install-offline.html +applies_to: + deployment: + self: +sub: + stack-version: "9.0.0" --- # Air gapped install -% What needs to be done: Refine +Some components of the {{stack}} require additional configuration and local dependencies in order to deploy in environments without internet access. This guide gives an overview of this setup scenario and helps bridge together existing documentation for individual parts of the stack. -% GitHub issue: https://github.com/elastic/docs-projects/issues/309 +Refer to the section for each Elastic component for air-gapped installation configuration and dependencies in a self-managed Linux environment. -% Use migrated content from existing pages that map to this page: +## {{es}} [air-gapped-elasticsearch] -% - [ ] ./raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md -% - [ ] ./raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md +Air-gapped install of {{es}} may require additional steps in order to access some of the features. General install and configuration guides are available in the [{{es}} install documentation](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). -% Internal links rely on the following IDs being on this page (e.g. as a heading ID, paragraph ID, etc): +Specifically: -$$$air-gapped-self-managed-linux$$$ +* To be able to use the GeoIP processor, refer to [the GeoIP processor documentation](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md#manually-update-geoip-databases) for instructions on downloading and deploying the required databases. +* Refer to [{{ml-cap}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-machine-learning) for instructions on deploying the Elastic Learned Sparse EncodeR (ELSER) natural language processing (NLP) model and other trained {{ml}} models. -$$$air-gapped-elasticsearch$$$ -$$$air-gapped-kibana$$$ +## {{kib}} [air-gapped-kibana] -$$$air-gapped-beats$$$ +Air-gapped install of {{kib}} may require a number of additional services in the local network in order to access some of the features. General install and configuration guides are available in the [{{kib}} install documentation](/deploy-manage/deploy/self-managed/install-kibana.md). -$$$air-gapped-logstash$$$ +Specifically: -$$$air-gapped-elastic-agent$$$ +* To be able to use {{kib}} mapping visualizations, you need to set up and configure the [Elastic Maps Service](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-maps-service). +* To be able to use {{kib}} sample data, install or update hundreds of prebuilt alert rules, and explore available data integrations, you need to set up and configure the [{{package-registry}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry). +* To provide detection rule updates for {{endpoint-sec}} agents, you need to set up and configure the [Elastic Endpoint Artifact Repository](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository). +* To access the APM integration, you need to set up and configure [Elastic APM](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-apm). +* To install and use the Elastic documentation for {{kib}} AI assistants, you need to set up and configure the [Elastic product documentation for {{kib}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana-product-documentation). -$$$air-gapped-fleet$$$ -$$$air-gapped-elastic-apm$$$ +## {{beats}} [air-gapped-beats] -$$$air-gapped-elastic-maps-service$$$ +Elastic {{beats}} are light-weight data shippers. They do not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{beats}} documentation](asciidocalypse://docs/beats/docs/reference/index.md). -$$$air-gapped-enterprise-search$$$ -$$$air-gapped-elastic-package-registry$$$ +## {{ls}} [air-gapped-logstash] -$$$air-gapped-elastic-artifact-registry$$$ +{{ls}} is a versatile data shipping and processing application. It does not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{ls}} documentation](asciidocalypse://docs/logstash/docs/reference/index.md). -$$$air-gapped-elastic-endpoint-artifact-repository$$$ -$$$air-gapped-machine-learning$$$ +## {{agent}} [air-gapped-elastic-agent] -$$$air-gapped-kubernetes-and-openshift$$$ +Air-gapped install of {{agent}} depends on the [{{package-registry}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry) and the [{{artifact-registry}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) for most use-cases. The agent itself is fairly lightweight and installs dependencies only as required by its configuration. In terms of connections to these dependencies, {{agents}} need to be able to connect to the {{artifact-registry}} directly, but {{package-registry}} connections are handled through [{{kib}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana). -$$$air-gapped-k8s-os-elastic-kubernetes-operator$$$ +Additionally, if the {{agent}} {{elastic-defend}} integration is used, then access to the [Elastic Endpoint Artifact Repository](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository) is necessary in order to deploy updates for some of the detection and prevention capabilities. -$$$air-gapped-k8s-os-elastic-package-registry$$$ +To learn more about install and configuration, refer to the [{{agent}} install documentation](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md). Make sure to check the requirements specific to running {{agents}} in an [air-gapped environment](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/air-gapped.md). -$$$air-gapped-k8s-os-elastic-artifact-registry$$$ +To get a better understanding of how to work with {{agent}} configuration settings and policies, refer to [Appendix D - Agent Integration Guide](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-guide). -$$$air-gapped-k8s-os-elastic-endpoint-artifact-repository$$$ -$$$air-gapped-k8s-os-ironbank-secure-images$$$ +## {{fleet-server}} [air-gapped-fleet] -$$$air-gapped-ece$$$ +{{fleet-server}} is a required middleware component for any scalable deployment of the {{agent}}. The air-gapped dependencies of {{fleet-server}} are the same as those of the [{{agent}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-agent). -$$$air-gapped-elastic-package-registry-example$$$ +To learn more about installing {{fleet-server}}, refer to the [{{fleet-server}} set up documentation](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/fleet-server.md). -$$$air-gapped-elastic-artifact-registry-example$$$ -$$$air-gapped-epr-kubernetes-example$$$ +## Elastic APM [air-gapped-elastic-apm] -$$$air-gapped-agent-integration-guide$$$ +Air-gapped setup of the APM server is possible in two ways: -$$$air-gapped-agent-integration-terminology$$$ +* By setting up one of the {{agent}} deployments with an APM integration, as described in [Switch a self-installation to the APM integration](/solutions/observability/apps/switch-self-installation-to-apm-integration.md). +* Or, by installing a standalone Elastic APM Server, as described in the [APM configuration documentation](/solutions/observability/apps/configure-apm-server.md). -$$$air-gapped-agent-integration-configure$$$ -$$$air-gapped-agent-integration-configure-kibana$$$ +## {{ems}} [air-gapped-elastic-maps-service] -$$$air-gapped-agent-integration-configure-yml$$$ +Refer to [Connect to {{ems}}](../../../explore-analyze/visualize/maps/maps-connect-to-ems.md) in the {{kib}} documentation to learn how to configure your firewall to connect to {{ems}}, host it locally, or disable it completely. -$$$air-gapped-agent-integration-configure-fleet-api$$$ -$$$air-gapped-kibana-product-documentation$$$ +## {{package-registry}} [air-gapped-elastic-package-registry] +Air-gapped install of the EPR is possible using any OCI-compatible runtime like Podman (a typical choice for RHEL-like Linux systems) or Docker. Links to the official container image and usage guide is available on the [Air-gapped environments](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/air-gapped.md) page in the {{fleet}} and {{agent}} Guide. -**This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: +::::{note} +Besides setting up the EPR service, you also need to [configure {{kib}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana) to use this service. If using TLS with the EPR service, it is also necessary to set up {{kib}} to trust the certificate presented by the EPR. +:::: -* [/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md](/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md) -* [/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md](/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md) \ No newline at end of file +### Additional {{package-registry}} examples + +:::{dropdown} Script to generate a SystemD service file on a RHEL 8 system + +The following script generates a SystemD service file on a RHEL 8 system in order to run EPR with Podman in a production environment. + +```sh +#!/usr/bin/env bash + +EPR_BIND_ADDRESS="0.0.0.0" +EPR_BIND_PORT="8443" +EPR_TLS_CERT="/etc/elastic/epr/epr.pem" +EPR_TLS_KEY="/etc/elastic/epr/epr-key.pem" +EPR_IMAGE="docker.elastic.co/package-registry/distribution:{{stack-version}}" + +podman create \ + --name "elastic-epr" \ + -p "$EPR_BIND_ADDRESS:$EPR_BIND_PORT:$EPR_BIND_PORT" \ + -v "$EPR_TLS_CERT:/etc/ssl/epr.crt:ro" \ + -v "$EPR_TLS_KEY:/etc/ssl/epr.key:ro" \ + -e "EPR_ADDRESS=0.0.0.0:$EPR_BIND_PORT" \ + -e "EPR_TLS_CERT=/etc/ssl/epr.crt" \ + -e "EPR_TLS_KEY=/etc/ssl/epr.key" \ + "$EPR_IMAGE" + +## creates service file in the root directory +# podman generate systemd --new --files --name elastic-epr --restart-policy always +``` +::: + +:::{dropdown} SystemD service file launched as a Podman service + +The following is an example of an actual SystemD service file for an EPR, launched as a Podman service. + +```ini +# container-elastic-epr.service +# autogenerated by Podman 4.1.1 +# Wed Oct 19 13:12:33 UTC 2022 + +[Unit] +Description=Podman container-elastic-epr.service +Documentation=man:podman-generate-systemd(1) +Wants=network-online.target +After=network-online.target +RequiresMountsFor=%t/containers + +[Service] +Environment=PODMAN_SYSTEMD_UNIT=%n +Restart=always +TimeoutStopSec=70 +ExecStartPre=/bin/rm -f %t/%n.ctr-id +ExecStart=/usr/bin/podman run \ + --cidfile=%t/%n.ctr-id \ + --cgroups=no-conmon \ + --rm \ + --sdnotify=conmon \ + -d \ + --replace \ + --name elastic-epr \ + -p 0.0.0.0:8443:8443 \ + -v /etc/elastic/epr/epr.pem:/etc/ssl/epr.crt:ro \ + -v /etc/elastic/epr/epr-key.pem:/etc/ssl/epr.key:ro \ + -e EPR_ADDRESS=0.0.0.0:8443 \ + -e EPR_TLS_CERT=/etc/ssl/epr.crt \ + -e EPR_TLS_KEY=/etc/ssl/epr.key docker.elastic.co/package-registry/distribution:{{stack-version}} +ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id +ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id +Type=notify +NotifyAccess=all + +[Install] +WantedBy=default.target +``` +::: + +## {{artifact-registry}} [air-gapped-elastic-artifact-registry] + +Air-gapped install of the {{artifact-registry}} is necessary in order to enable {{agent}} deployments to perform self-upgrades and install certain components which are needed for some of the data integrations (that is, in addition to what is also retrieved from the EPR). To learn more, refer to [Host your own artifact registry for binary downloads](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/air-gapped.md#host-artifact-registry) in the {{fleet}} and {{elastic-agent}} Guide. + +::::{note} +When setting up own web server, such as NGINX, to function as the {{artifact-registry}}, it is recommended not to use TLS as there are, currently, no direct ways to establish certificate trust between {{agents}} and this service. +:::: + +### Additional {{artifact-registry}} examples + +:::{dropdown} Artifact download script + +The following example script downloads artifacts from the internet to be later served as a private Elastic Package Registry. + +```sh +#!/usr/bin/env bash +set -o nounset -o errexit -o pipefail + +STACK_VERSION={{stack-version}} +ARTIFACT_DOWNLOADS_BASE_URL=https://artifacts.elastic.co/downloads + +DOWNLOAD_BASE_DIR=${DOWNLOAD_BASE_DIR:?"Make sure to set DOWNLOAD_BASE_DIR when running this script"} + +COMMON_PACKAGE_PREFIXES="apm-server/apm-server beats/auditbeat/auditbeat beats/elastic-agent/elastic-agent beats/filebeat/filebeat beats/heartbeat/heartbeat beats/metricbeat/metricbeat beats/osquerybeat/osquerybeat beats/packetbeat/packetbeat cloudbeat/cloudbeat endpoint-dev/endpoint-security fleet-server/fleet-server" + +WIN_ONLY_PACKAGE_PREFIXES="beats/winlogbeat/winlogbeat" + +RPM_PACKAGES="beats/elastic-agent/elastic-agent" +DEB_PACKAGES="beats/elastic-agent/elastic-agent" + +function download_packages() { + local url_suffix="$1" + local package_prefixes="$2" + + local _url_suffixes="$url_suffix ${url_suffix}.sha512 ${url_suffix}.asc" + local _pkg_dir="" + local _dl_url="" + + for _download_prefix in $package_prefixes; do + for _pkg_url_suffix in $_url_suffixes; do + _pkg_dir=$(dirname ${DOWNLOAD_BASE_DIR}/${_download_prefix}) + _dl_url="${ARTIFACT_DOWNLOADS_BASE_URL}/${_download_prefix}-${_pkg_url_suffix}" + (mkdir -p $_pkg_dir && cd $_pkg_dir && curl -O "$_dl_url") + done + done +} + +# and we download +for _os in linux windows; do + case "$_os" in + linux) + PKG_URL_SUFFIX="${STACK_VERSION}-${_os}-x86_64.tar.gz" + ;; + windows) + PKG_URL_SUFFIX="${STACK_VERSION}-${_os}-x86_64.zip" + ;; + *) + echo "[ERROR] Something happened" + exit 1 + ;; + esac + + download_packages "$PKG_URL_SUFFIX" "$COMMON_PACKAGE_PREFIXES" + + if [[ "$_os" = "windows" ]]; then + download_packages "$PKG_URL_SUFFIX" "$WIN_ONLY_PACKAGE_PREFIXES" + fi + + if [[ "$_os" = "linux" ]]; then + download_packages "${STACK_VERSION}-x86_64.rpm" "$RPM_PACKAGES" + download_packages "${STACK_VERSION}-amd64.deb" "$DEB_PACKAGES" + fi +done + + +## selinux tweaks +# semanage fcontext -a -t "httpd_sys_content_t" '/opt/elastic-packages(/.*)?' +# restorecon -Rv /opt/elastic-packages +``` +::: + +:::{dropdown} NGINX config for private {{artifact-registry}} web server +The following is an example NGINX configuration for running a web server for the {{artifact-registry}}. + +```sh +user nginx; +worker_processes 2; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + sendfile on; + keepalive_timeout 65; + + server { + listen 9080 default_server; + server_name _; + root /opt/elastic-packages; + + location / { + + } + } + +} +``` +::: + + +## Elastic Endpoint Artifact Repository [air-gapped-elastic-endpoint-artifact-repository] + +Air-gapped setup of this component is, essentially, identical to the setup of the [{{artifact-registry}}](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) except that different artifacts are served. To learn more, refer to [Configure offline endpoints and air-gapped environments](../../../solutions/security/configure-elastic-defend/configure-offline-endpoints-air-gapped-environments.md) in the Elastic Security guide. + + +## {{ml-cap}} [air-gapped-machine-learning] + +Some {{ml}} features, like natural language processing (NLP), require you to deploy trained models. To learn about deploying {{ml}} models in an air-gapped environment, refer to: + +* [Deploy ELSER in an air-gapped environment](../../../explore-analyze/machine-learning/nlp/ml-nlp-elser.md#air-gapped-install). +* [Install trained models in an air-gapped environment with Eland](asciidocalypse://docs/eland/docs/reference/machine-learning.md#ml-nlp-pytorch-air-gapped). + + +## {{kib}} Product documentation for AI Assistants [air-gapped-kibana-product-documentation] + +Detailed install and configuration instructions are available in the [{{kib}} AI Assistants settings documentation](asciidocalypse://docs/kibana/docs/reference/configuration-reference/ai-assistant-settings.md). \ No newline at end of file From a36e79e97c1f3f636c5d0f15cea545992a9287b1 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Fri, 7 Mar 2025 10:50:33 -0500 Subject: [PATCH 12/43] stuff --- deploy-manage/deploy.md | 1 + deploy-manage/deploy/self-managed.md | 4 ++-- deploy-manage/deploy/self-managed/deploy-cluster.md | 9 +++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/deploy-manage/deploy.md b/deploy-manage/deploy.md index 52702cced3..9502cf4aec 100644 --- a/deploy-manage/deploy.md +++ b/deploy-manage/deploy.md @@ -24,6 +24,7 @@ This page will help you understand your deployment options and choose the approa ## Core components Every Elastic deployment requires {{es}} as its core data store and search/analytics engine. + Additionally, {{kib}} provides the user interface for all Elastic solutions and Serverless projects. It is required for most use cases, from data exploration to monitoring and security analysis. Your choice of deployment type determines how you'll set up and manage these core components, plus any additional components you need. diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index fae873d0c7..a0160e91d8 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -5,6 +5,6 @@ sub: stack-version: "9.0.0" --- -# Self-managed cluster [dependencies-versions] +# Self-managed cluster + -See [Elastic Stack third-party dependencies](https://artifacts.elastic.co/reports/dependencies/dependencies-current.md) for the complete list of dependencies for {{es}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/deploy-cluster.md index 1e1a2bb1b2..70953e689f 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/deploy-cluster.md @@ -5,7 +5,7 @@ mapped_urls: - https://www.elastic.co/guide/en/elastic-stack/current/installing-elastic-stack.html --- -# Deploy the cluster +# Deploy an {{es}} cluster % What needs to be done: Refine @@ -28,4 +28,9 @@ $$$dedicated-host$$$ * [/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md) % Doesn't exist -% * [/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md](/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md) \ No newline at end of file +% * [/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md](/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md) + + +## Third-party dependencies [dependencies-versions] + +See [Elastic Stack third-party dependencies](https://artifacts.elastic.co/reports/dependencies/dependencies-current.html) for the complete list of dependencies for {{es}}. \ No newline at end of file From 4a5b52d33feee876328347090f10090f4e13ed37 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Fri, 7 Mar 2025 11:16:35 -0500 Subject: [PATCH 13/43] conflicts --- .../deploy/self-managed/_snippets/enroll-nodes.md | 2 +- .../deploy/self-managed/_snippets/enroll-steps.md | 2 +- .../self-managed/_snippets/new-enrollment-token.md | 2 +- .../deploy/self-managed/_snippets/security-files.md | 2 +- deploy-manage/deploy/self-managed/_snippets/trial.md | 2 +- deploy-manage/deploy/self-managed/bootstrap-checks.md | 10 +++++----- .../install-elasticsearch-docker-configure.md | 2 +- .../self-managed/install-elasticsearch-docker-prod.md | 4 ++-- deploy-manage/deploy/self-managed/install-kibana.md | 2 +- .../deploy/self-managed/installing-elasticsearch.md | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md index 272238e259..aa453065ac 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md @@ -6,7 +6,7 @@ Additionally, only nodes on the same host can join the cluster without additiona To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](elasticsearch://reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. ```sh bin{{slash}}elasticsearch-create-enrollment-token -s node diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md b/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md index 12eee83897..0f3615db25 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-steps.md @@ -5,7 +5,7 @@ If this is the first time you’re starting {{kib}}, this command generates a un 3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. ::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. +If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](elasticsearch://reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](elasticsearch://reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. :::: :::{tip} diff --git a/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md b/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md index 982a0f2199..40a0b5a884 100644 --- a/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md +++ b/deploy-manage/deploy/self-managed/_snippets/new-enrollment-token.md @@ -1,4 +1,4 @@ -If your enrollment token has expired, then you can generate a new enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: +If your enrollment token has expired, then you can generate a new enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](elasticsearch://reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: ```sh bin/elasticsearch-create-enrollment-token -s kibana diff --git a/deploy-manage/deploy/self-managed/_snippets/security-files.md b/deploy-manage/deploy/self-managed/_snippets/security-files.md index 33738d807c..b38a487f45 100644 --- a/deploy-manage/deploy/self-managed/_snippets/security-files.md +++ b/deploy-manage/deploy/self-managed/_snippets/security-files.md @@ -9,7 +9,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](/deploy-manage/security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](/deploy-manage/security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](elasticsearch://reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: diff --git a/deploy-manage/deploy/self-managed/_snippets/trial.md b/deploy-manage/deploy/self-managed/_snippets/trial.md index eecd7bb663..f62c5f1d25 100644 --- a/deploy-manage/deploy/self-managed/_snippets/trial.md +++ b/deploy-manage/deploy/self-managed/_snippets/trial.md @@ -1 +1 @@ -This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. \ No newline at end of file +This package contains both free and subscription features. [Start a 30-day trial](elasticsearch://reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks.md b/deploy-manage/deploy/self-managed/bootstrap-checks.md index 8401249a51..0be7fcd772 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks.md @@ -31,9 +31,9 @@ There are some bootstrap checks that are always enforced to prevent {{es}} from ## Development vs. production mode [dev-vs-prod-mode] -By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](/deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an {{es}} node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. +By default, {{es}} binds to loopback addresses for [HTTP and transport (internal) communication](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md). This is fine for downloading and playing with {{es}} as well as everyday development, but it’s useless for production systems. To join a cluster, an {{es}} node must be reachable via transport communication. To join a cluster via a non-loopback address, a node must bind transport to a non-loopback address and not be using [single-node discovery](/deploy-manage/deploy/self-managed/bootstrap-checks.md#single-node-discovery). Thus, we consider an {{es}} node to be in development mode if it can not form a cluster with another machine via a non-loopback address, and is otherwise in production mode if it can join a cluster via non-loopback addresses. -Note that HTTP and transport can be configured independently via [`http.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) and [`transport.host`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings). This can be useful for configuring a single node to be reachable via HTTP for testing purposes without triggering production mode. +Note that HTTP and transport can be configured independently via [`http.host`](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) and [`transport.host`](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings). This can be useful for configuring a single node to be reachable via HTTP for testing purposes without triggering production mode. ## Single-node discovery [single-node-discovery] @@ -43,13 +43,13 @@ Some users need to bind the transport to an external interface for testing a rem ## Forcing the bootstrap checks [_forcing_the_bootstrap_checks] -If you are running a single node in production, it is possible to evade the bootstrap checks, either by not binding transport to an external interface, or by binding transport to an external interface and setting the discovery type to `single-node`. For this situation, you can force execution of the bootstrap checks by setting the system property `es.enforce.bootstrap.checks` to `true` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). We strongly encourage you to do this if you are in this specific situation. This system property can be used to force execution of the bootstrap checks independent of the node configuration. +If you are running a single node in production, it is possible to evade the bootstrap checks, either by not binding transport to an external interface, or by binding transport to an external interface and setting the discovery type to `single-node`. For this situation, you can force execution of the bootstrap checks by setting the system property `es.enforce.bootstrap.checks` to `true` in the [JVM options](elasticsearch://reference/elasticsearch/jvm-settings.md#set-jvm-options). We strongly encourage you to do this if you are in this specific situation. This system property can be used to force execution of the bootstrap checks independent of the node configuration. ## Checks :::{dropdown} Heap size check -By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. +By default, {{es}} automatically sizes JVM heap based on a node’s [roles](elasticsearch://reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. To avoid these issues, start the JVM with an initial heap size equal to the maximum heap size. ::: @@ -225,7 +225,7 @@ $$$bootstrap-checks-xpack-token-ssl$$$ If you use {{es}} {{security-features}} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. -In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see [Security settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md) and [Advanced HTTP settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings). +In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see [Security settings](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) and [Advanced HTTP settings](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#http-settings). To pass this bootstrap check, you must enable HTTPS or disable the built-in token service. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md index 69bd81ee21..543f8dc40e 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -112,7 +112,7 @@ Some plugins require additional security permissions. You must explicitly accept * Attaching a `tty` when you run the Docker image and allowing the permissions when prompted. * Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command. -See [Plugin management](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. +See [Plugin management](elasticsearch://reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. ## Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index 2d7a29c7ad..383da82141 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -162,9 +162,9 @@ The image [exposes](https://docs.docker.com/engine/reference/builder/#/expose) T ## Manually set the heap size [docker-set-heap-size] -By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. +By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](elasticsearch://reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. -To manually set the heap size in production, bind mount a [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size) settings. +To manually set the heap size in production, bind mount a [JVM options](elasticsearch://reference/elasticsearch/jvm-settings.md#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](elasticsearch://reference/elasticsearch/jvm-settings.md#set-jvm-heap-size) settings. For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index a5efb92613..0f1fc7bbd2 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -23,7 +23,7 @@ This section includes information on how to setup {{kib}} and get it running, in Packages of {{kib}} are provided for and tested against Linux, Darwin, and Windows. Since {{kib}} runs on Node.js, we include the necessary Node.js binaries for these platforms. Running {{kib}} against a separately maintained version of Node.js is not supported. -To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} for Linux ships with a custom build of Node.js with glibc 2.17 support. For details, see [Custom builds of Node.js](asciidocalypse://docs/kibana/docs/extend/upgrading-nodejs.md#custom-nodejs-builds). +To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} for Linux ships with a custom build of Node.js with glibc 2.17 support. For details, see [Custom builds of Node.js](kibana://extend/upgrading-nodejs.md#custom-nodejs-builds). ## {{kib}} install packages [install] diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index 08637845ee..4034dce077 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -59,7 +59,7 @@ The bundled JVM is treated the same as any other dependency of {{es}} in terms o :::: -If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.md) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. +If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.html) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. To use your own version of Java, set the `ES_JAVA_HOME` environment variable to the path to your own JVM installation. The bundled JVM is located within the `jdk` subdirectory of the {{es}} home directory. You may remove this directory if using your own JVM. From aba29a1284a4218308d1298d6785e95048554fb0 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Fri, 7 Mar 2025 14:03:17 -0500 Subject: [PATCH 14/43] overview --- deploy-manage/deploy/deployment-comparison.md | 2 +- deploy-manage/deploy/self-managed.md | 99 +++++++++++++++++++ .../deploy/self-managed/access-kibana.md | 2 +- .../deploy/self-managed/tools-apis.md | 8 +- 4 files changed, 108 insertions(+), 3 deletions(-) diff --git a/deploy-manage/deploy/deployment-comparison.md b/deploy-manage/deploy/deployment-comparison.md index 4c71a81591..0feb76594a 100644 --- a/deploy-manage/deploy/deployment-comparison.md +++ b/deploy-manage/deploy/deployment-comparison.md @@ -1,5 +1,5 @@ -# Compare deployment options +# Detailed deployment comparison This reference provides detailed comparisons of features and capabilities across Elastic's deployment options: self-managed deployments, Elastic Cloud Hosted, and Serverless. For a high-level overview of deployment types and guidance on choosing between them, see the [overview](../deploy.md). diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index a0160e91d8..27e81507f2 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -7,4 +7,103 @@ sub: # Self-managed cluster +If you want to install Elastic on your own premises without the assistance of an [orchestrator](/deploy-manage/deploy.md#about-orchestration), then you can deploy a self-managed cluster. If you deploy a self-managed cluster, then you have complete control and responsibility over every aspect of your Elastic deployment. +To quickly set up {{es}} and {{kib}} in Docker for local development or testing, jump to [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). + +:::{admonition} Simplify the deployment process +Self-managed clusters are useful for local development, and for exploring Elastic features. However, Elastic offer several deployment options that can simplify the process of deploying and managing multi-node deployments, especially in production. They also allow you to deploy and manage multiple deployments from a single surface. + +Managed by Elastic: +* [{{serverless-full}}](/deploy-manage/deploy/elastic-cloud/serverless.md) +* [{{ech}}](/deploy-manage/deploy/elastic-cloud/cloud-hosted.md) + +Self-hosted options: +* [{{eck}}](/deploy-manage/deploy/cloud-on-k8s.md) +* [{{ece}}](/deploy-manage/deploy/cloud-enterprise.md) + +For a comparison of these deployment options, refer to [Choosing your deployment type](/deploy-manage/deploy#choosing-your-deployment-type.md) and [](/deploy-manage/deploy/deployment-comparison.md). +::: + +## Section overview + +This section focuses on deploying {{es}} and {{kib}} without an orchestrator. + +Depending on your use case, you might need to deploy other components, such as APM, Fleet, or Logstash. Deploying those components is not covered in this section. [Learn more about optional components](/get-started/the-stack.md). + +This section covers the following tasks: + +### Deploying Elasticsearch + +Learn how to install and configure {{es}}. {{es}} is the distributed search and analytics engine, scalable data store, and vector database at the heart of all Elastic solutions. + +* [](/deploy-manage/deploy/self-managed/deploy-cluster.md) + * [](/deploy-manage/deploy/self-managed/important-system-configuration.md): Prepare your environment for an {{es}} installation. + * [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md): Install and run {{es}} using one of our install packages or container images. + * [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md): Quickly set up {{es}} and {{kib}} in Docker for local development or testing. +* [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md): Learn how to make configuration changes to {{es}} + * [](/deploy-manage/deploy/self-managed/important-settings-configuration.md): Learn about key settings required for production environments. + * [](/deploy-manage/deploy/self-managed/plugins.md): Learn about how to extend {{es}} functionality with plugins. + + :::{note} + For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference.md). + ::: + +### Deploying Kibana + +After you deploy {{es}}, you can install {{kib}}. {{kib}} provides the user interface for all Elastic solutions. It’s a powerful tool for visualizing and analyzing your data, and for managing and monitoring the {{stack}}. Although {{kib}} is not required to use {{es}}, it's required for most use cases. + +* [](/deploy-manage/deploy/self-managed/install-kibana.md): Install {{kib}} using one of our install packages or container images, and enroll {{kib}} with your {{es}} cluster. +* [](/deploy-manage/deploy/self-managed/configure-kibana.md): Learn how to make configuration changes to {{kib}}. +* [](/deploy-manage/deploy/self-managed/access-kibana.md): Learn how to access {{kib}} using a web browser. + +### Installing in air gapped environments + +Some components of the {{stack}} require additional configuration and local dependencies in order to deploy in environments without internet access. + +Refer to [](/deploy-manage/deploy/self-managed/air-gapped-install.md) to learn how to install {{es}}, {{kib}}, and optional components in an environment without internet access. + +### Tools and APIs + +Review a list of all of the resources that you can use to interact with your self-managed cluster, including tools, APIs, client libraries, and more. + +[](/deploy-manage/deploy/self-managed/tools-apis.md). + +## Other important sections + +Review these other sections for critical information about securing and managing your self-managed cluster. + +### Secure and control access + +Learn how to secure your Elastic environment to restrict access to only authorized parties, and allow communication between your environment and external parties. + +* [](/deploy-manage/security.md): Learn about security features that prevent bad actors from tampering with your data, and encrypt communications to, from, and within your cluster. +* [](/deploy-manage/users-roles.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. +* [](/deploy-manage/manage-spaces.md): Learn how to organize content in {{kib}}, and restrict access to this content to specific users. +* [](/deploy-manage/api-keys.md): Authenticate and authorize programmatic access to your deployments and {{es}} resources. +* [](/deploy-manage/manage-connectors.md): Manage connection information between Elastic and third-party systems. +* [](/deploy-manage/remote-clusters.md): Enable communication between {{es}} clusters to support [cross-cluster replication](/deploy-manage/tools/cross-cluster-replication.md) and [cross-cluster search](/solutions/search/cross-cluster-search.md). + +### Administer and maintain + +Monitor the performance of your Elastic environment, administer your license, set up backup and resilience tools, and maintain the health of your environment. + +* [](/deploy-manage/tools.md): Learn about the tools available to safeguard data, ensure continuous availability, and maintain resilience in your {{es}} environment. +* [](/deploy-manage/monitor.md): View health and performance data for Elastic components, and receive recommendations and insights. +* [](/deploy-manage/license.md): Learn how to manage your Elastic license. +* [](/deploy-manage/maintenance.md): Learn how to isolate or deactivate parts of your Elastic environment to perform maintenance, or restart parts of Elastic. + +### Upgrade + +You can [upgrade your Elastic environment](/deploy-manage/upgrade.md) to gain access to the latest features. Learn how to upgrade your cluster or deployment to the latest {{stack}} version, or upgrade your {{ece}} orchestrator or {{eck}} operator to the latest version. + +### Design guidance + +Learn how to design a production-ready Elastic environment. + +* [](/deploy-manage/production-guidance.md): Review tips and guidance that you can use to design a production environment that matches your workloads, policies, and deployment needs. +* [](/deploy-manage/reference-architectures.md): Explore blueprints for deploying clusters tailored to different use cases. + +### Architectural information + +In the [](/deploy-manage/distributed-architecture.md) section, learn about the architecture of {{es}} and {{kib}}, and how Elastic stores and retrieves data and executes tasks in clusters with multiple nodes. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/access-kibana.md b/deploy-manage/deploy/self-managed/access-kibana.md index dbe5b4a598..8e4adb882e 100644 --- a/deploy-manage/deploy/self-managed/access-kibana.md +++ b/deploy-manage/deploy/self-managed/access-kibana.md @@ -9,7 +9,7 @@ Access {{kib}} through the web application on port 5601. 1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. - To remotely connect to {{kib}}, set [server.host](configure.md#server-host) to a non-loopback address. + To remotely connect to {{kib}}, set [`server.host`](kibana://reference/configuration-reference/general-settings.md#server-host) to a non-loopback address. 2. Log on to your account. 3. Go to the home page, then click **{{kib}}**. diff --git a/deploy-manage/deploy/self-managed/tools-apis.md b/deploy-manage/deploy/self-managed/tools-apis.md index dc9b34e50b..0c982406ac 100644 --- a/deploy-manage/deploy/self-managed/tools-apis.md +++ b/deploy-manage/deploy/self-managed/tools-apis.md @@ -1,4 +1,10 @@ -# Tools and APIs +--- +navigation_title: "Tools and APIs" +applies_to: + deployment: + self: +--- +# Tools and APIs for self-managed clusters % What needs to be done: Write from scratch From 4fe35e93f3700ddea0ffd87255fa037ff4466130 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Fri, 7 Mar 2025 16:10:45 -0500 Subject: [PATCH 15/43] getting closer --- .../deploy/_snippets/installation-order.md | 10 ++ .../_snippets/stack-version-compatibility.md | 3 + .../cloud-enterprise/configure-host-ubuntu.md | 2 +- deploy-manage/deploy/self-managed.md | 3 + .../self-managed/_snippets/dedicated-hosts.md | 1 + .../self-managed/_snippets/enroll-nodes.md | 4 +- .../deploy/self-managed/access-kibana.md | 8 ++ .../deploy/self-managed/configure-kibana.md | 8 +- .../deploy/self-managed/deploy-cluster.md | 100 +++++++++++++++--- .../important-settings-configuration.md | 4 +- .../important-system-configuration.md | 17 ++- ...asticsearch-from-archive-on-linux-macos.md | 10 +- ...stall-elasticsearch-with-debian-package.md | 10 +- .../install-elasticsearch-with-docker.md | 2 +- .../install-elasticsearch-with-rpm.md | 10 +- ...stall-elasticsearch-with-zip-on-windows.md | 10 +- .../deploy/self-managed/install-kibana.md | 13 ++- .../self-managed/installing-elasticsearch.md | 69 ------------ ...cal-development-installation-quickstart.md | 5 +- .../self-managed/setting-system-settings.md | 13 ++- .../add-and-remove-elasticsearch-nodes.md | 33 +----- deploy-manage/toc.yml | 27 +++-- get-started/deployment-options.md | 5 + get-started/the-stack.md | 19 ++++ .../elasticsearch-reference/setup.md | 18 ---- .../elastic-stack/air-gapped-install.md | 2 +- .../fleet/host-proxy-env-vars.md | 2 +- .../observability/apps/apm-server-systemd.md | 2 +- 28 files changed, 220 insertions(+), 190 deletions(-) create mode 100644 deploy-manage/deploy/_snippets/installation-order.md create mode 100644 deploy-manage/deploy/_snippets/stack-version-compatibility.md create mode 100644 deploy-manage/deploy/self-managed/_snippets/dedicated-hosts.md delete mode 100644 deploy-manage/deploy/self-managed/installing-elasticsearch.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md diff --git a/deploy-manage/deploy/_snippets/installation-order.md b/deploy-manage/deploy/_snippets/installation-order.md new file mode 100644 index 0000000000..5c456ea67f --- /dev/null +++ b/deploy-manage/deploy/_snippets/installation-order.md @@ -0,0 +1,10 @@ +If you're deploying the {{stack}} in a self-managed cluster, then install the {{stack}} products you want to use in the following order: + +* {{es}} +* {{kib}} +* [Logstash](logstash://reference/index.md) +* [{{agent}}](/reference/ingestion-tools/fleet.md) or [Beats](asciidocalypse://docs/beats/docs/reference/index.md) +* [APM](/solutions/observability/apps/application-performance-monitoring-apm.md) +* [Elasticsearch Hadoop](elasticsearch-hadoop://reference/index.md) + +Installing in this order ensures that the components each product depends on are in place. \ No newline at end of file diff --git a/deploy-manage/deploy/_snippets/stack-version-compatibility.md b/deploy-manage/deploy/_snippets/stack-version-compatibility.md new file mode 100644 index 0000000000..04d400a082 --- /dev/null +++ b/deploy-manage/deploy/_snippets/stack-version-compatibility.md @@ -0,0 +1,3 @@ +When installing the {{stack}}, you must use the same version across the entire stack. For example, if you are using {{es}} {{stack-version}}, you install Beats {{stack-version}}, APM Server {{stack-version}}, Elasticsearch Hadoop {{stack-version}}, {{kib}} {{stack-version}}, and Logstash {{stack-version}}. + +If you’re upgrading an existing installation, see [](/deploy-manage/upgrade.md) for information about how to ensure compatibility with {{stack-version}}. \ No newline at end of file diff --git a/deploy-manage/deploy/cloud-enterprise/configure-host-ubuntu.md b/deploy-manage/deploy/cloud-enterprise/configure-host-ubuntu.md index fd98d15e69..37d4362d31 100644 --- a/deploy-manage/deploy/cloud-enterprise/configure-host-ubuntu.md +++ b/deploy-manage/deploy/cloud-enterprise/configure-host-ubuntu.md @@ -238,7 +238,7 @@ You can specify `--log-opt max-size` and `--log-opt max-file` to define the Dock 1. Update `/etc/systemd/system/docker.service.d/docker.conf`. If the file path and file do not exist, create them first. - ```sh + ```ini [Unit] Description=Docker Service After=multi-user.target diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 27e81507f2..c61f8fb0d1 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -3,6 +3,9 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/dependencies-versions.html sub: stack-version: "9.0.0" +applies_to: + deployment: + self: --- # Self-managed cluster diff --git a/deploy-manage/deploy/self-managed/_snippets/dedicated-hosts.md b/deploy-manage/deploy/self-managed/_snippets/dedicated-hosts.md new file mode 100644 index 0000000000..733335ca33 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/dedicated-hosts.md @@ -0,0 +1 @@ +In production, we recommend you run {{es}} on a dedicated host or as a primary service. Several {{es}} features, such as automatic JVM heap sizing, assume that {{es}} is the only resource-intensive application on the host or container. For example, you might run {{metricbeat}} alongside {{es}} for cluster statistics, but a resource-heavy {{ls}} deployment should be on its own host. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md index aa453065ac..20f9d9f225 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md @@ -26,4 +26,6 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti config{{slash}}certs ``` -3. Repeat the previous step for any new nodes that you want to enroll. \ No newline at end of file +3. Repeat the previous step for any new nodes that you want to enroll. + +For more information about discovery and shard allocation, refer to [Discovery and cluster formation](../distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](elasticsearch://reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/access-kibana.md b/deploy-manage/deploy/self-managed/access-kibana.md index 8e4adb882e..633d97196d 100644 --- a/deploy-manage/deploy/self-managed/access-kibana.md +++ b/deploy-manage/deploy/self-managed/access-kibana.md @@ -15,6 +15,14 @@ Access {{kib}} through the web application on port 5601. 3. Go to the home page, then click **{{kib}}**. 4. To make the {{kib}} page your landing page, click **Make this my landing page**. +## Grant other users access to {{kib}} + +{{kib}} leverages {{es}} authentication and authorization technologies to secure access. + +To learn about authentication options, refer to [](/deploy-manage/users-roles/cluster-or-deployment-auth/user-authentication.md). + +To learn how to enable authentication providers for {{kib}}, refer to [](/deploy-manage/users-roles/cluster-or-deployment-auth/kibana-authentication.md). + ## Resources * [Troubleshoot: Check {{kib}} server status](/troubleshoot/kibana/access.md) diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md index 7481936313..8446300159 100644 --- a/deploy-manage/deploy/self-managed/configure-kibana.md +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -26,4 +26,10 @@ Environment variables can be injected into configuration using `${MY_ENV_VAR}` s ## Available settings -For a complete list of settings that you can apply to {{kib}}, refer to [{{kib}} configuration reference](kibana:///reference/configuration-reference.md). \ No newline at end of file +For a complete list of settings that you can apply to {{kib}}, refer to [{{kib}} configuration reference](kibana:///reference/configuration-reference.md). + + +* Link to areas to configure SSL certificates to encrypt client browsers communications (HTTPS) --> This is a bit unclear and difficult as the HTTPS endpoint configuration in Kibana appears in Elasticsearch documentation. +* Link to "Secure access to Kibana" elastic.co/guide/en/kibana/current/tutorial-secure-access-to-kibana.html +* Link to Use Kibana in production (with load balancers): elastic.co/guide/en/kibana/current/production.html +* Link to doc about using more than 1 Kibana instance? (not sure if it exists though) \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/deploy-cluster.md index 70953e689f..8395d3cc8e 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/deploy-cluster.md @@ -1,35 +1,105 @@ --- mapped_urls: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro-deploy.html - https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html - https://www.elastic.co/guide/en/elastic-stack/current/installing-elastic-stack.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html + - https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-stack-security.html +sub: + stack-version: "9.0.0" +applies_to: + deployment: + self: --- # Deploy an {{es}} cluster -% What needs to be done: Refine +% Doesn't exist +% * [/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md](/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md) -% GitHub issue: https://github.com/elastic/docs-projects/issues/340 -% Scope notes: Work with the previous content to explain the different options to install {{es}} and Kibana, remove the references to cloud based installation. cover ES + kibana - install of other stack components should be taken care of in that content set hints about install order (First ES then Kib). Add an introduction also to the installation methods (locally, production, multiple OSs). +This section includes information on how to set up {{es}} and get it running, including: -% Use migrated content from existing pages that map to this page: +* [Configuring your system to support {{es}}](/deploy-manage/deploy/self-managed/important-system-configuration.md), and the [bootstrap checks](/deploy-manage/deploy/self-managed/bootstrap-checks.md) that are run at startup to verify these configurations +* Downloading, installing, and starting {{es}} using each [supported installation method](#installation-methods) -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md -% - [ ] ./raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md -% - [ ] ./raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md -% Notes: 1-5 +To quickly set up {{es}} and {{kib}} in Docker for local development or testing, jump to [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). -$$$dedicated-host$$$ +## Installation methods -**This page is a work in progress.** The documentation team is working to combine content pulled from the following pages: +If you want to install and manage {{es}} yourself, you can: -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/elasticsearch-intro-deploy.md] -* [/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md](/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md) +* Run {{es}} using a [Linux, MacOS, or Windows install package](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). +* Run {{es}} in a [Docker container](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). -% Doesn't exist -% * [/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md](/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md) +::::{tip} +To try out {{stack}} on your own machine, we recommend using Docker and running both {{es}} and {{kib}}. For more information, see [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). This setup is not suitable for production use. +:::: + +::::{admonition} Use dedicated hosts +:::{include} _snippets/dedicated-hosts.md +::: +:::: + +### {{es}} install packages [elasticsearch-install-packages] + +{{es}} is provided in the following package formats. + +Each linked guide provides the following details: + +* Download and installation instructions +* Information on enrolling a newly installed node in an existing cluster +* Instructions on starting {{es}} manually and, if applicable, as a service or daemon +* Instructions on connecting clients to your new cluster +* Archive or package contents information +* Security certificate and key information + +Before you start, make sure that you [configure your system](/deploy-manage/deploy/self-managed/important-system-configuration.md). + +| Format | Description | Instructions | +| --- | --- | --- | +| Linux and MacOS `tar.gz` archives | The `tar.gz` archives are available for installation on any Linux distribution and MacOS. | [Install {{es}} from archive on Linux or MacOS](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) | +| Windows `.zip` archive | The `zip` archive is suitable for installation on Windows. | [Install {{es}} with `.zip` on Windows](/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) | +| `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages can be downloaded from the {{es}} website or from our Debian repository. | [Install {{es}} with Debian Package](/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) | +| `rpm` | The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPM packages can be downloaded from the {{es}} website or from our RPM repository. | [Install {{es}} with RPM](/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) | + +### {{es}} container images [elasticsearch-docker-images] + +You can also run {{es}} inside a docket container image. Docker container images may be downloaded from the Elastic Docker Registry. + +[Install {{es}} with Docker](/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) + +## Version compatibility + +:::{include} /deploy-manage/deploy/_snippets/stack-version-compatibility.md +::: + +## Installation order + +:::{include} /deploy-manage/deploy/_snippets/installation-order.md +::: + +## Supported operating systems and JVMs [supported-platforms] + +The matrix of officially supported operating systems and JVMs is available in the [Elastic Support Matrix](https://elastic.co/support/matrix). {{es}} is tested on the listed platforms, but it is possible that it will work on other platforms too. + +### Java (JVM) Version [jvm-version] + +{{es}} is built using Java, and includes a bundled version of [OpenJDK](https://openjdk.java.net) within each distribution. We strongly recommend using the bundled JVM in all installations of {{es}}. + +The bundled JVM is treated the same as any other dependency of {{es}} in terms of support and maintenance. This means that Elastic takes responsibility for keeping it up to date, and reacts to security issues and bug reports as needed to address vulnerabilities and other bugs in {{es}}. Elastic’s support of the bundled JVM is subject to Elastic’s [support policy](https://www.elastic.co/support_policy) and [end-of-life schedule](https://www.elastic.co/support/eol) and is independent of the support policy and end-of-life schedule offered by the original supplier of the JVM. Elastic does not support using the bundled JVM for purposes other than running {{es}}. + +::::{tip} +{{es}} uses only a subset of the features offered by the JVM. Bugs and security issues in the bundled JVM often relate to features that {{es}} does not use. Such issues do not apply to {{es}}. Elastic analyzes reports of security vulnerabilities in all its dependencies, including in the bundled JVM, and will issue an [Elastic Security Advisory](https://www.elastic.co/community/security) if such an advisory is needed. +:::: + + +If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.html) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. + +To use your own version of Java, set the `ES_JAVA_HOME` environment variable to the path to your own JVM installation. The bundled JVM is located within the `jdk` subdirectory of the {{es}} home directory. You may remove this directory if using your own JVM. +:::{warning} +Don’t use third-party Java agents that attach to the JVM. These agents can reduce {{es}} performance, including freezing or crashing nodes. +::: ## Third-party dependencies [dependencies-versions] diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index 29102ed240..5d93986172 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -53,10 +53,10 @@ path: logs: "C:\\Elastic\\Elasticsearch\\logs" ``` :::::: +::::::: {{es}} offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path.md#multiple-data-paths). -::::::: ::::{warning} * Don’t modify anything within the data directory or run processes that might interfere with its contents. @@ -65,8 +65,6 @@ path: * Don’t run virus scanners on the data directory. A virus scanner can prevent {{es}} from working correctly and may modify the contents of the data directory. The data directory contains no executables so a virus scan will only find false positives. :::: -Elasticsearch offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](elasticsearch://reference/elasticsearch/index-settings/path.md#multiple-data-paths). - ## Cluster name setting [_cluster_name_setting] diff --git a/deploy-manage/deploy/self-managed/important-system-configuration.md b/deploy-manage/deploy/self-managed/important-system-configuration.md index c027afdd98..8e2438f8a3 100644 --- a/deploy-manage/deploy/self-managed/important-system-configuration.md +++ b/deploy-manage/deploy/self-managed/important-system-configuration.md @@ -22,12 +22,23 @@ The following settings **must** be considered before going to production: * [](system-config-tcpretries.md) (Linux only) :::{tip} -For examples of applying these settings in a Docker environment, refer to [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md). +For examples of applying the relevant settings in a Docker environment, refer to [](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md). ::: -## Development mode vs. production mode [dev-vs-prod] +::::{admonition} Use dedicated hosts +:::{include} _snippets/dedicated-hosts.md +::: +:::: + +## Bootstrap checks + +{{es}} has bootstrap checks that run at startup to ensure that users have configured all important settings. + +For a list of the checks and their meaning, refer to [](/deploy-manage/deploy/self-managed/bootstrap-checks.md). + +### Development mode vs. production mode [dev-vs-prod] -By default, {{es}} assumes that you are working in development mode. If any of the above settings are not configured correctly, a warning will be written to the log file, but you will be able to start and run your {{es}} node. +By default, {{es}} assumes that you are working in development mode. If any of the above settings are not configured correctly, the relevant bootstrap check will fail and a warning will be written to the log file, but you will be able to start and run your {{es}} node. As soon as you configure a network setting like `network.host`, {{es}} assumes that you are moving to production and will upgrade the above warnings to exceptions. These exceptions will prevent your {{es}} node from starting. This is an important safety measure to ensure that you will not lose data because of a misconfigured server. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index f6750c2426..99fbf4a34d 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -7,7 +7,7 @@ sub: export: "export " escape: "\\" stack-version: "9.0.0" -navigation_title: "Linux or MacOS" +navigation_title: "Install on Linux or MacOS" applies_to: deployment: self: @@ -151,12 +151,12 @@ This is convenient because you don’t have to create any directories to start u | --- | --- | --- | --- | | home | {{es}} home directory or `$ES_HOME` | Directory created by unpacking the archive | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `$ES_HOME/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `$ES_HOME/config` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files, including `elasticsearch.yml` | `$ES_HOME/config` | [`ES_PATH_CONF`](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location) | | conf | Generated TLS keys and certificates for the transport and HTTP layer. | `$ES_HOME/config/certs` | | -| data | The location of the data files of each index / shard allocated on the node. | `$ES_HOME/data` | `path.data` | -| logs | Log files location. | `$ES_HOME/logs` | `path.logs` | +| data | The location of the data files of each index / shard allocated on the node. | `$ES_HOME/data` | [`path.data`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | +| logs | Log files location. | `$ES_HOME/logs` | [`path.logs`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `$ES_HOME/plugins` | | -| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | +| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | [`path.repo`](/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md) | ### Security certificates and keys [security_certificates_and_keys] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index f33d08a609..a82940033e 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -8,7 +8,7 @@ sub: export: "export " escape: "\\" stack-version: "9.0.0" -navigation_title: Debian +navigation_title: Install with Debian package applies_to: deployment: self: @@ -175,14 +175,14 @@ The Debian package places config files, logs, and the data directory in the appr | --- | --- | --- | --- | | home | {{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | [`ES_PATH_CONF`](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location) | | conf | Environment variables including heap size, file descriptors. | `/etc/default/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | -| data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | +| data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | [`path.data`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | jdk | The bundled Java Development Kit used to run {{es}}. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/default/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | -| logs | Log files location. | `/var/log/elasticsearch` | `path.logs` | +| logs | Log files location. | `/var/log/elasticsearch` | [`path.logs`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/elasticsearch/plugins` | | -| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | +| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | [`path.repo`](/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md) | ### Security certificates and keys [_security_certificates_and_keys] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 1a65e65ac5..bf6a26ac91 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -1,7 +1,7 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html -navigation_title: "Docker" +navigation_title: "Install with Docker" sub: stack-version: "9.0.0" applies_to: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 27d5d6af27..db3e205ff6 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -8,7 +8,7 @@ sub: export: "export" escape: "\\" stack-version: "9.0.0" -navigation_title: "RPM" +navigation_title: "Install with RPM package" applies_to: deployment: self: @@ -166,14 +166,14 @@ The RPM places config files, logs, and the data directory in the appropriate loc | --- | --- | --- | --- | | home |{{es}} home directory or `$ES_HOME` | `/usr/share/elasticsearch` | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `/usr/share/elasticsearch/bin` | | -| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `/etc/elasticsearch` | [`ES_PATH_CONF`](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location) | | conf | Environment variables including heap size, file descriptors. | `/etc/sysconfig/elasticsearch` | | | conf | Generated TLS keys and certificates for the transport and http layer. | `/etc/elasticsearch/certs` | | -| data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | `path.data` | +| data | The location of the data files of each index / shard allocated on the node. | `/var/lib/elasticsearch` | [`path.data`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | jdk | The bundled Java Development Kit used to run {{es}}. Can be overridden by setting the `ES_JAVA_HOME` environment variable in `/etc/sysconfig/elasticsearch`. | `/usr/share/elasticsearch/jdk` | | -| logs | Log files location. | `/var/log/elasticsearch` | `path.logs` | +| logs | Log files location. | `/var/log/elasticsearch` | [`path.logs`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/elasticsearch/plugins` | | -| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | +| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | [`path.repo`](/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md) | ### Security certificates and keys [_security_certificates_and_keys] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index d73ffdfef9..0b51e1e9e2 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -7,7 +7,7 @@ sub: export: "$" escape: "^" stack-version: "9.0.0" -navigation_title: Windows +navigation_title: Install on Windows applies_to: deployment: self: @@ -224,12 +224,12 @@ This is very convenient because you don’t have to create any directories to st | --- | --- | --- | --- | | home | {{es}} home directory or `%ES_HOME%` | Directory created by unpacking the archive | | | bin | Binary scripts including `elasticsearch` to start a node and `elasticsearch-plugin` to install plugins | `%ES_HOME%\bin` | | -| conf | Configuration files including `elasticsearch.yml` | `%ES_HOME%\config` | `[ES_PATH_CONF](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location)` | +| conf | Configuration files including `elasticsearch.yml` | `%ES_HOME%\config` | [`ES_PATH_CONF`](/deploy-manage/deploy/self-managed/configure-elasticsearch.md#config-files-location) | | conf | Generated TLS keys and certificates for the transport and HTTP layer. | `%ES_HOME%\config\certs` | | -| data | The location of the data files of each index / shard allocated on the node. | `%ES_HOME%\data` | `path.data` | -| logs | Log files location. | `%ES_HOME%\logs` | `path.logs` | +| data | The location of the data files of each index / shard allocated on the node. | `%ES_HOME%\data` | [`path.data`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | +| logs | Log files location. | `%ES_HOME%\logs` | [`path.logs`](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `%ES_HOME%\plugins` | | -| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | `path.repo` | +| repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. | Not configured | [`path.repo`](/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md) | diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index 0f1fc7bbd2..f9a8c95cb5 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -5,6 +5,8 @@ mapped_urls: applies_to: deployment: self: +sub: + stack-version: "9.0.0" --- # Install {{kib}} @@ -61,10 +63,17 @@ If your {{es}} installation is protected by [{{stack-security-features}}](/deplo ## {{es}} version [elasticsearch-version] -{{kib}} should be configured to run against an {{es}} node of the same version. This is the officially supported configuration. +:::{include} /deploy-manage/deploy/_snippets/stack-version-compatibility.md +::: Running different major version releases of {{kib}} and {{es}} (e.g. {{kib}} 9.x and {{es}} 8.x) is not supported, nor is running a minor version of {{kib}} that is newer than the version of {{es}} (e.g. {{kib}} 8.14 and {{es}} 8.13). Running a minor version of {{es}} that is higher than {{kib}} will generally work in order to facilitate an upgrade process where {{es}} is upgraded first (e.g. {{kib}} 8.14 and {{es}} 8.15). In this configuration, a warning will be logged on {{kib}} server startup, so it’s only meant to be temporary until {{kib}} is upgraded to the same version as {{es}}. -Running different patch version releases of {{kib}} and {{es}} (e.g. {{kib}} 9.0.0 and {{es}} 9.0.1) is generally supported, though we encourage users to run the same versions of {{kib}} and {{es}} down to the patch version. \ No newline at end of file +Running different patch version releases of {{kib}} and {{es}} (e.g. {{kib}} 9.0.0 and {{es}} 9.0.1) is generally supported, though we encourage users to run the same versions of {{kib}} and {{es}} down to the patch version. + + +## Installation order + +:::{include} /deploy-manage/deploy/_snippets/installation-order.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md deleted file mode 100644 index 4034dce077..0000000000 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -mapped_urls: - - https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html - - https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-stack-security.html ---- - -# Install {{es}} [install-elasticsearch] - -If you want to install and manage {{es}} yourself, you can: - -* Run {{es}} using a [Linux, MacOS, or Windows install package](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). -* Run {{es}} in a [Docker container](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). - -::::{tip} -To try out on your own machine, we recommend using Docker and running both {{es}} and Kibana. For more information, see [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). This setup is not suitable for production use. -:::: - -## {{es}} install packages [elasticsearch-install-packages] - -{{es}} is provided in the following package formats: - -Linux and MacOS `tar.gz` archives -: The `tar.gz` archives are available for installation on any Linux distribution and MacOS. - - [Install {{es}} from archive on Linux or MacOS](/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md) - - -Windows `.zip` archive -: The `zip` archive is suitable for installation on Windows. - - [Install {{es}} with `.zip` on Windows](/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md) - - -`deb` -: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages can be downloaded from the {{es}} website or from our Debian repository. - - [Install {{es}} with Debian Package](/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md) - - -`rpm` -: The `rpm` package is suitable for installation on Red Hat, Centos, SLES, OpenSuSE and other RPM-based systems. RPM packages can be downloaded from the {{es}} website or from our RPM repository. - - [Install {{es}} with RPM](/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md) - -## {{es}} container images [elasticsearch-docker-images] - -You can also run {{es}} inside a docket container image. Docker container images may be downloaded from the Elastic Docker Registry. - -[Install {{es}} with Docker](/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) - -## Java (JVM) Version [jvm-version] - -{{es}} is built using Java, and includes a bundled version of [OpenJDK](https://openjdk.java.net) within each distribution. We strongly recommend using the bundled JVM in all installations of {{es}}. - -The bundled JVM is treated the same as any other dependency of {{es}} in terms of support and maintenance. This means that Elastic takes responsibility for keeping it up to date, and reacts to security issues and bug reports as needed to address vulnerabilities and other bugs in {{es}}. Elastic’s support of the bundled JVM is subject to Elastic’s [support policy](https://www.elastic.co/support_policy) and [end-of-life schedule](https://www.elastic.co/support/eol) and is independent of the support policy and end-of-life schedule offered by the original supplier of the JVM. Elastic does not support using the bundled JVM for purposes other than running {{es}}. - -::::{tip} -{{es}} uses only a subset of the features offered by the JVM. Bugs and security issues in the bundled JVM often relate to features that {{es}} does not use. Such issues do not apply to {{es}}. Elastic analyzes reports of security vulnerabilities in all its dependencies, including in the bundled JVM, and will issue an [Elastic Security Advisory](https://www.elastic.co/community/security) if such an advisory is needed. -:::: - - -If you decide to run {{es}} using a version of Java that is different from the bundled one, prefer to use the latest release of a [LTS version of Java](https://www.oracle.com/technetwork/java/eol-135779.html) which is [listed in the support matrix](https://elastic.co/support/matrix). Although such a configuration is supported, if you encounter a security issue or other bug in your chosen JVM then Elastic may not be able to help unless the issue is also present in the bundled JVM. Instead, you must seek assistance directly from the supplier of your chosen JVM. You must also take responsibility for reacting to security and bug announcements from the supplier of your chosen JVM. {{es}} may not perform optimally if using a JVM other than the bundled one. {{es}} is closely coupled to certain OpenJDK-specific features, so it may not work correctly with JVMs that are not OpenJDK. {{es}} will refuse to start if you attempt to use a known-bad JVM version. - -To use your own version of Java, set the `ES_JAVA_HOME` environment variable to the path to your own JVM installation. The bundled JVM is located within the `jdk` subdirectory of the {{es}} home directory. You may remove this directory if using your own JVM. - - -## JVM and Java agents [jvm-agents] - -Don’t use third-party Java agents that attach to the JVM. These agents can reduce {{es}} performance, including freezing or crashing nodes. diff --git a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md index 48ede71a67..8b56c3a546 100644 --- a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md +++ b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md @@ -1,7 +1,10 @@ --- -navigation_title: "Run {{es}} locally" +navigation_title: "Local installation (quickstart)" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html +applies_to: + deployment: + self: --- diff --git a/deploy-manage/deploy/self-managed/setting-system-settings.md b/deploy-manage/deploy/self-managed/setting-system-settings.md index 6df1f96f2c..13f9f401ff 100644 --- a/deploy-manage/deploy/self-managed/setting-system-settings.md +++ b/deploy-manage/deploy/self-managed/setting-system-settings.md @@ -63,13 +63,12 @@ Ubuntu ignores the `limits.conf` file for processes started by `init.d`. To enab ## Sysconfig file [sysconfig] -When using the RPM or Debian packages, environment variables can be specified in the system configuration file, which is located in: +When using the RPM or Debian packages, environment variables can be specified in the system configuration file, which is located in the relevant location for your package type: -RPM -: `/etc/sysconfig/elasticsearch` - -Debian -: `/etc/default/elasticsearch` +| Package type | Location | +| --- | --- | +| RPM | `/etc/sysconfig/elasticsearch`| +| Debian | `/etc/default/elasticsearch` | However, system limits need to be specified via [systemd](#systemd). @@ -82,7 +81,7 @@ The systemd service file (`/usr/lib/systemd/system/elasticsearch.service`) conta To override them, add a file called `/etc/systemd/system/elasticsearch.service.d/override.conf` (alternatively, you may run `sudo systemctl edit elasticsearch` which opens the file automatically inside your default editor). Set any changes in this file, such as: -```sh +```ini [Service] LimitMEMLOCK=infinity ``` diff --git a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md index 175a087eae..a618d51dad 100644 --- a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md +++ b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md @@ -43,37 +43,8 @@ To add a node to a cluster running on multiple machines, you must also set [`dis :::: -When {{es}} starts for the first time, the security auto-configuration process binds the HTTP layer to `0.0.0.0`, but only binds the transport layer to localhost. This intended behavior ensures that you can start a single-node cluster with security enabled by default without any additional configuration. - -Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. - -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) for more information. - -To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. - -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](elasticsearch://reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - - ```sh - bin\elasticsearch-create-enrollment-token -s node - ``` - - Copy the enrollment token, which you’ll use to enroll new nodes with your {{es}} cluster. - -2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - - ```sh - bin\elasticsearch --enrollment-token - ``` - - {{es}} automatically generates certificates and keys in the following directory: - - ```sh - config\certs - ``` - -3. Repeat the previous step for any new nodes that you want to enroll. - -For more information about discovery and shard allocation, refer to [*Discovery and cluster formation*](../distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](elasticsearch://reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). +:::{include} deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +::: ## Master-eligible nodes [add-elasticsearch-nodes-master-eligible] diff --git a/deploy-manage/toc.yml b/deploy-manage/toc.yml index 0026cabc38..9520db4c66 100644 --- a/deploy-manage/toc.yml +++ b/deploy-manage/toc.yml @@ -292,6 +292,7 @@ toc: children: - file: deploy/self-managed/deploy-cluster.md children: + - file: deploy/self-managed/local-development-installation-quickstart.md - file: deploy/self-managed/important-system-configuration.md children: - file: deploy/self-managed/setting-system-settings.md @@ -302,20 +303,18 @@ toc: - file: deploy/self-managed/networkaddress-cache-ttl.md - file: deploy/self-managed/executable-jna-tmpdir.md - file: deploy/self-managed/system-config-tcpretries.md - - file: deploy/self-managed/installing-elasticsearch.md - children: - - file: deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md - - file: deploy/self-managed/install-elasticsearch-with-zip-on-windows.md - - file: deploy/self-managed/install-elasticsearch-with-debian-package.md - - file: deploy/self-managed/install-elasticsearch-with-rpm.md - - file: deploy/self-managed/install-elasticsearch-with-docker.md - children: - - file: deploy/self-managed/install-elasticsearch-docker-basic.md - - file: deploy/self-managed/install-elasticsearch-docker-compose.md - - file: deploy/self-managed/install-elasticsearch-docker-prod.md - - file: deploy/self-managed/install-elasticsearch-docker-configure.md - - file: deploy/self-managed/local-development-installation-quickstart.md - - file: deploy/self-managed/bootstrap-checks.md + - file: deploy/self-managed/bootstrap-checks.md + - file: deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md + - file: deploy/self-managed/install-elasticsearch-with-zip-on-windows.md + - file: deploy/self-managed/install-elasticsearch-with-debian-package.md + - file: deploy/self-managed/install-elasticsearch-with-rpm.md + - file: deploy/self-managed/install-elasticsearch-with-docker.md + children: + - file: deploy/self-managed/install-elasticsearch-docker-basic.md + - file: deploy/self-managed/install-elasticsearch-docker-compose.md + - file: deploy/self-managed/install-elasticsearch-docker-prod.md + - file: deploy/self-managed/install-elasticsearch-docker-configure.md + - file: deploy/self-managed/configure-elasticsearch.md children: - file: deploy/self-managed/important-settings-configuration.md diff --git a/get-started/deployment-options.md b/get-started/deployment-options.md index 1c467a481d..39eb4df286 100644 --- a/get-started/deployment-options.md +++ b/get-started/deployment-options.md @@ -1,3 +1,8 @@ +--- +mapped_urls: + - https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro-deploy.html +--- + # Deployment options [elasticsearch-intro-deploy] Use this page for a quick overview of your options for deploying Elastic. diff --git a/get-started/the-stack.md b/get-started/the-stack.md index e9445a02e3..2daa664da6 100644 --- a/get-started/the-stack.md +++ b/get-started/the-stack.md @@ -5,6 +5,8 @@ mapped_urls: - https://www.elastic.co/guide/en/kibana/current/index.html - https://www.elastic.co/guide/en/elastic-stack/current/installing-elastic-stack.html - https://www.elastic.co/guide/en/elastic-stack/current/overview.html +sub: + stack-version: "9.0.0" --- # The {{stack}} @@ -88,3 +90,20 @@ $$$stack-components-elasticsearch-clients$$$ {{es}} clients : The clients provide a convenient mechanism to manage API requests and responses to and from {{es}} from popular languages such as Java, Ruby, Go, Python, and others. Both official and community contributed clients are available. [Learn more about the {{es}} clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html). +## Version compatibility +```{applies_to} +deployment: + self: +``` + +:::{include} /deploy-manage/deploy/_snippets/stack-version-compatibility.md +::: + +## Installation order +```{applies_to} +deployment: + self: +``` + +:::{include} /deploy-manage/deploy/_snippets/installation-order.md +::: \ No newline at end of file diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md deleted file mode 100644 index 35776fce6c..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/setup.md +++ /dev/null @@ -1,18 +0,0 @@ -# Set up {{es}} [setup] - -This section includes information on how to setup Elasticsearch and get it running, including: - -* Downloading -* Installing -* Starting -* Configuring - - -## Supported platforms [supported-platforms] - -The matrix of officially supported operating systems and JVMs is available here: [Support Matrix](https://elastic.co/support/matrix). Elasticsearch is tested on the listed platforms, but it is possible that it will work on other platforms too. - - -## Use dedicated hosts [dedicated-host] - -In production, we recommend you run {{es}} on a dedicated host or as a primary service. Several {{es}} features, such as automatic JVM heap sizing, assume it’s the only resource-intensive application on the host or container. For example, you might run {{metricbeat}} alongside {{es}} for cluster statistics, but a resource-heavy {{ls}} deployment should be on its own host. diff --git a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md b/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md index c7540fe976..5097f6beea 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md +++ b/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md @@ -233,7 +233,7 @@ podman create \ The following is an example of an actual SystemD service file for an EPR, launched as a Podman service. -```shell +```ini # container-elastic-epr.service # autogenerated by Podman 4.1.1 # Wed Oct 19 13:12:33 UTC 2022 diff --git a/reference/ingestion-tools/fleet/host-proxy-env-vars.md b/reference/ingestion-tools/fleet/host-proxy-env-vars.md index 61aabc9ad8..6d92b6291b 100644 --- a/reference/ingestion-tools/fleet/host-proxy-env-vars.md +++ b/reference/ingestion-tools/fleet/host-proxy-env-vars.md @@ -45,7 +45,7 @@ The location where you set these environment variables is platform-specific and Then add the environment variables under `[Service]` - ```shell + ```ini [Service] Environment="HTTPS_PROXY=https://my.proxy:8443" diff --git a/solutions/observability/apps/apm-server-systemd.md b/solutions/observability/apps/apm-server-systemd.md index 24eb7bf7bb..6ecdc705d1 100644 --- a/solutions/observability/apps/apm-server-systemd.md +++ b/solutions/observability/apps/apm-server-systemd.md @@ -74,7 +74,7 @@ To override these variables, create a drop-in unit file in the `/etc/systemd/sys For example a file with the following content placed in `/etc/systemd/system/apm-server.service.d/debug.conf` would override `BEAT_LOG_OPTS` to enable debug for {{es}} output. -```text +```ini [Service] Environment="BEAT_LOG_OPTS=-d elasticsearch" ``` From 82d1037f131b77729277cdf47ac4b787e212d894 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 10:36:56 -0400 Subject: [PATCH 16/43] more --- deploy-manage/deploy/self-managed/access-kibana.md | 3 +++ deploy-manage/deploy/self-managed/configure-kibana.md | 3 +++ deploy-manage/deploy/self-managed/deploy-cluster.md | 2 ++ 3 files changed, 8 insertions(+) diff --git a/deploy-manage/deploy/self-managed/access-kibana.md b/deploy-manage/deploy/self-managed/access-kibana.md index 633d97196d..bfb6045a5d 100644 --- a/deploy-manage/deploy/self-managed/access-kibana.md +++ b/deploy-manage/deploy/self-managed/access-kibana.md @@ -1,6 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/kibana/current/access.html +applies_to: + deployment: + self: --- # Access {{kib}} [access] diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md index 8446300159..fa66c81310 100644 --- a/deploy-manage/deploy/self-managed/configure-kibana.md +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -1,6 +1,9 @@ --- mapped_pages: - https://www.elastic.co/guide/en/kibana/current/settings.html +applies_to: + deployment: + self: --- # Configure {{kib}} [settings] diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/deploy-cluster.md index 8395d3cc8e..f731d3fb6f 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/deploy-cluster.md @@ -66,6 +66,8 @@ Before you start, make sure that you [configure your system](/deploy-manage/depl You can also run {{es}} inside a docket container image. Docker container images may be downloaded from the Elastic Docker Registry. +You can [use Docker Compose](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md) to deploy multiple nodes at once. + [Install {{es}} with Docker](/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md) ## Version compatibility From ace2bcff99a8d15453910648dce891b3b66ed5e0 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 11:42:37 -0400 Subject: [PATCH 17/43] clean up some labels --- .../billing/manage-subscription.md | 3 ++- deploy-manage/users-roles.md | 12 +++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/deploy-manage/cloud-organization/billing/manage-subscription.md b/deploy-manage/cloud-organization/billing/manage-subscription.md index 346b95958b..929109a72a 100644 --- a/deploy-manage/cloud-organization/billing/manage-subscription.md +++ b/deploy-manage/cloud-organization/billing/manage-subscription.md @@ -42,7 +42,8 @@ To change your subscription level: ### Feature usage notifications [ec_feature_usage_notifications] :::{applies_to} -:hosted: all +deployment: + ess: all ::: If you try to change your subscription to a lower level, but you are using features that belong either to your current level or to a higher one, you need to make some changes before you can proceed, as described in **Review required feature changes**. diff --git a/deploy-manage/users-roles.md b/deploy-manage/users-roles.md index 8059b7bd7e..eafbeb5984 100644 --- a/deploy-manage/users-roles.md +++ b/deploy-manage/users-roles.md @@ -31,7 +31,8 @@ Preventing unauthorized access is only one element of a complete security strate ## Cloud organization level :::{applies_to} -:hosted: all +deployment: + ess: all :serverless: all ::: @@ -89,10 +90,11 @@ As an extension of the [predefined instance access roles](/deploy-manage/users-r ## Cluster or deployment level :::{applies_to} -:ece: all -:hosted: all -:eck: all -:stack: all +deployment: + ece: all + ess: all + eck: all + self: all ::: Set up authentication and authorization at the cluster or deployment level, and learn about the underlying security technologies that Elasticsearch uses to authenticate and authorize requests internally and across services. From 46a4cbb17c9e5493e1042f20f9239f9edcf418a4 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 11:45:07 -0400 Subject: [PATCH 18/43] fix more annotations --- .../billing/manage-subscription.md | 4 ++-- deploy-manage/users-roles.md | 17 +++++++++-------- explore-analyze/find-and-organize/data-views.md | 16 ++++++++-------- .../find-and-organize/saved-objects.md | 6 +++--- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/deploy-manage/cloud-organization/billing/manage-subscription.md b/deploy-manage/cloud-organization/billing/manage-subscription.md index 929109a72a..45c8717656 100644 --- a/deploy-manage/cloud-organization/billing/manage-subscription.md +++ b/deploy-manage/cloud-organization/billing/manage-subscription.md @@ -41,10 +41,10 @@ To change your subscription level: ### Feature usage notifications [ec_feature_usage_notifications] -:::{applies_to} +```{applies_to} deployment: ess: all -::: +``` If you try to change your subscription to a lower level, but you are using features that belong either to your current level or to a higher one, you need to make some changes before you can proceed, as described in **Review required feature changes**. diff --git a/deploy-manage/users-roles.md b/deploy-manage/users-roles.md index eafbeb5984..ddc1ebeb8e 100644 --- a/deploy-manage/users-roles.md +++ b/deploy-manage/users-roles.md @@ -30,11 +30,11 @@ Preventing unauthorized access is only one element of a complete security strate ## Cloud organization level -:::{applies_to} +```{applies_to} deployment: ess: all :serverless: all -::: +``` If you’re using {{ecloud}}, then you can perform the following tasks to control access to your Cloud organization, your Cloud Hosted deployments, and your Cloud Serverless projects: @@ -52,8 +52,9 @@ For {{ech}} deployments, you can configure SSO at the organization level, the de ## Orchestrator level -:::{applies_to} -:ece: all +```{applies_to} +deployment: + ece: all ::: Control access to your {{ece}} [orchestrator](/deploy-manage/deploy/cloud-enterprise/deploy-an-orchestrator.md) and deployments. @@ -79,9 +80,9 @@ You can't manage users and roles for {{eck}} clusters at the orchestrator level. ## Project level -:::{applies_to} +```{applies_to} :serverless: all -::: +``` As an extension of the [predefined instance access roles](/deploy-manage/users-roles/cloud-organization/user-roles.md#ec_instance_access_roles) offered for {{serverless-short}} projects, you can create custom roles at the project level to provide more granular control, and provide users with only the access they need within specific projects. @@ -89,13 +90,13 @@ As an extension of the [predefined instance access roles](/deploy-manage/users-r ## Cluster or deployment level -:::{applies_to} +```{applies_to} deployment: ece: all ess: all eck: all self: all -::: +``` Set up authentication and authorization at the cluster or deployment level, and learn about the underlying security technologies that Elasticsearch uses to authenticate and authorize requests internally and across services. diff --git a/explore-analyze/find-and-organize/data-views.md b/explore-analyze/find-and-organize/data-views.md index b3133aef82..02408ed6b4 100644 --- a/explore-analyze/find-and-organize/data-views.md +++ b/explore-analyze/find-and-organize/data-views.md @@ -101,10 +101,10 @@ Temporary {{data-sources}} are not available in the **Management** menu. ### Use {{data-sources}} with rolled up data [rollup-data-view] -::::{applies_to} -:serverless: unavailable -:stack: deprecated -:::: +```{applies_to} +serverless: unavailable +stack: deprecated +``` ::::{admonition} :class: warning @@ -123,10 +123,10 @@ For an example, refer to [Create and visualize rolled up data](/manage-data/life ### Use {{data-sources}} with {{ccs}} [management-cross-cluster-search] -::::{applies_to} -:serverless: unavailable -:stack: ga -:::: +```{applies_to} +serverless: unavailable +stack: ga +``` If your {{es}} clusters are configured for [{{ccs}}](../../solutions/search/cross-cluster-search.md), you can create a {{data-source}} to search across the clusters of your choosing. Specify data streams, indices, and aliases in a remote cluster using the following syntax: diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index 8786d93446..a8d214b2e7 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -126,9 +126,9 @@ However, saved objects can only be imported into the same version, a newer minor | 7.8.1 | 9.0.0 | No | ## Saved Object IDs [saved-object-ids] -::::{applies_to} -:stack: -:::: +```{applies_to} +stack: +``` In the past, many saved object types could have the same ID in different [spaces](/deploy-manage/manage-spaces.md). For example, if you copied dashboard "123" from the one space to another space, the second dashboard would also have an ID of "123". While the saved object ID is not something that users would interact with directly, many aspects of {{kib}} rely on it, notably URLs. If you have a "deep link" URL to a saved dashboard, that URL includes the saved object ID. From baa6aa171d6bbafa935a911411574dd469c17118 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 13:05:57 -0400 Subject: [PATCH 19/43] broken window --- explore-analyze/find-and-organize/saved-objects.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index a8d214b2e7..41db53c3d5 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -116,9 +116,8 @@ With each release, {{kib}} introduces changes to the way saved objects are store However, saved objects can only be imported into the same version, a newer minor on the same major, or the next major. Exported saved objects are not backward compatible and cannot be imported into an older version of {{kib}}. For example: -| | | | -| --- | --- | --- | | Exporting version | Importing version | Compatible? | +| --- | --- | --- | | 8.7.0 | 8.8.1 | Yes | | 7.8.1 | 8.3.0 | Yes | | 8.3.0 | 8.11.1 | Yes | From e5fb206610394ae71395a1cbf06dddd15e7d1b48 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 13:53:55 -0400 Subject: [PATCH 20/43] toc cleanup --- deploy-manage/deploy/self-managed.md | 9 +- .../self-managed/configure-elasticsearch.md | 53 +- .../deploy/self-managed/configure-kibana.md | 14 +- .../docs-content/serverless/spaces.md | 56 -- .../autoscaling-deciders.md | 30 - .../autoscaling-fixed-decider.md | 57 -- .../autoscaling-frozen-existence-decider.md | 6 - .../autoscaling-frozen-shards-decider.md | 10 - .../autoscaling-frozen-storage-decider.md | 12 - .../autoscaling-machine-learning-decider.md | 52 -- .../autoscaling-proactive-storage-decider.md | 39 -- .../autoscaling-reactive-storage-decider.md | 8 - .../change-passwords-native-users.md | 25 - .../snapshot-restore.md | 144 ----- .../snapshots-restore-snapshot.md | 503 ----------------- .../xpack-autoscaling.md | 18 - .../elastic-stack/air-gapped-install.md | 533 ------------------ .../stack-docs/elastic-stack/overview.md | 15 - .../get-elastic.md | 86 --- .../index.md | 3 - raw-migrated-files/toc.yml | 19 - 21 files changed, 24 insertions(+), 1668 deletions(-) delete mode 100644 raw-migrated-files/docs-content/serverless/spaces.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-deciders.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-fixed-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-existence-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-shards-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-storage-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/change-passwords-native-users.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/snapshot-restore.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/snapshots-restore-snapshot.md delete mode 100644 raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-autoscaling.md delete mode 100644 raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md delete mode 100644 raw-migrated-files/stack-docs/elastic-stack/overview.md delete mode 100644 raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/get-elastic.md delete mode 100644 raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/index.md diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index c61f8fb0d1..6053cd1dd4 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -81,11 +81,11 @@ Review these other sections for critical information about securing and managing Learn how to secure your Elastic environment to restrict access to only authorized parties, and allow communication between your environment and external parties. * [](/deploy-manage/security.md): Learn about security features that prevent bad actors from tampering with your data, and encrypt communications to, from, and within your cluster. -* [](/deploy-manage/users-roles.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. +* [](/deploy-manage/users-roles/cluster-or-deployment-auth.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. * [](/deploy-manage/manage-spaces.md): Learn how to organize content in {{kib}}, and restrict access to this content to specific users. * [](/deploy-manage/api-keys.md): Authenticate and authorize programmatic access to your deployments and {{es}} resources. * [](/deploy-manage/manage-connectors.md): Manage connection information between Elastic and third-party systems. -* [](/deploy-manage/remote-clusters.md): Enable communication between {{es}} clusters to support [cross-cluster replication](/deploy-manage/tools/cross-cluster-replication.md) and [cross-cluster search](/solutions/search/cross-cluster-search.md). +* [](/deploy-manage/remote-clusters/remote-clusters-self-managed.md): Enable communication between {{es}} clusters to support [cross-cluster replication](/deploy-manage/tools/cross-cluster-replication.md) and [cross-cluster search](/solutions/search/cross-cluster-search.md). ### Administer and maintain @@ -94,11 +94,12 @@ Monitor the performance of your Elastic environment, administer your license, se * [](/deploy-manage/tools.md): Learn about the tools available to safeguard data, ensure continuous availability, and maintain resilience in your {{es}} environment. * [](/deploy-manage/monitor.md): View health and performance data for Elastic components, and receive recommendations and insights. * [](/deploy-manage/license.md): Learn how to manage your Elastic license. -* [](/deploy-manage/maintenance.md): Learn how to isolate or deactivate parts of your Elastic environment to perform maintenance, or restart parts of Elastic. +* [](/deploy-manage/maintenance/start-stop-services.md): Learn how to isolate or deactivate parts of your Elastic environment to perform maintenance, or restart parts of Elastic. +* [](/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md): Learn how to add nodes to a cluster or remove them from a cluster to change the size and capacity of {{es}}. ### Upgrade -You can [upgrade your Elastic environment](/deploy-manage/upgrade.md) to gain access to the latest features. Learn how to upgrade your cluster or deployment to the latest {{stack}} version, or upgrade your {{ece}} orchestrator or {{eck}} operator to the latest version. +You can [upgrade your Elastic environment](/deploy-manage/upgrade.md) to gain access to the latest features. ### Design guidance diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index 0eacc62959..8e84e1efd7 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -131,49 +131,14 @@ Static settings can only be configured on an unstarted or shut down node using ` Static settings must be set on every relevant node in the cluster. +## Additional guides +Refer to the following documentation to learn how to perform key configuration tasks for {{es}}: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -$$$path-settings$$$ - -$$$ref-saml-settings$$$ - -$$$ref-oidc-settings$$$ - -$$$ref-kerberos-settings$$$ - -$$$hashing-settings$$$ - -$$$cluster-shard-limit$$$ \ No newline at end of file +* [](/deploy-manage/security.md): Learn about security features that prevent bad actors from tampering with your data, and encrypt communications to, from, and within your cluster. +* [](/deploy-manage/users-roles/cluster-or-deployment-auth.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. +* [](/deploy-manage/api-keys.md): Authenticate and authorize programmatic access to your deployments and {{es}} resources. +* [](/deploy-manage/manage-connectors.md): Manage connection information between Elastic and third-party systems. +* [](/deploy-manage/remote-clusters.md): Enable communication between {{es}} clusters to support [cross-cluster replication](/deploy-manage/tools/cross-cluster-replication.md) and [cross-cluster search](/solutions/search/cross-cluster-search.md). +* [](/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md): Learn how to add or remove nodes to change the size and capacity of your cluster. +* [](/deploy-manage/production-guidance.md): Review tips and guidance that you can use to design a production environment that matches your workloads, policies, and deployment needs. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md index fa66c81310..649f608772 100644 --- a/deploy-manage/deploy/self-managed/configure-kibana.md +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -31,8 +31,14 @@ Environment variables can be injected into configuration using `${MY_ENV_VAR}` s For a complete list of settings that you can apply to {{kib}}, refer to [{{kib}} configuration reference](kibana:///reference/configuration-reference.md). +## Additional guides -* Link to areas to configure SSL certificates to encrypt client browsers communications (HTTPS) --> This is a bit unclear and difficult as the HTTPS endpoint configuration in Kibana appears in Elasticsearch documentation. -* Link to "Secure access to Kibana" elastic.co/guide/en/kibana/current/tutorial-secure-access-to-kibana.html -* Link to Use Kibana in production (with load balancers): elastic.co/guide/en/kibana/current/production.html -* Link to doc about using more than 1 Kibana instance? (not sure if it exists though) \ No newline at end of file +Refer to the following documentation to learn how to perform key configuration tasks for {{kib}}: + +* [Configure SSL certificates](/deploy-manage/security/set-up-basic-security-plus-https.md#encrypt-kibana-browser) to encrypt traffic between client browsers and {{kib}} +* [Enable authentication providers](/deploy-manage/users-roles/cluster-or-deployment-auth/kibana-authentication.md) for {{kib}} +* Use [Spaces](/deploy-manage/manage-spaces.md) to organize content in {{kib}}, and restrict access to this content to specific users +* Use [Connectors](/deploy-manage/manage-connectors.md) to manage connection information between {{es}}, {{kib}}, and third-party systems +* Present a [user access agreement](/deploy-manage/users-roles/cluster-or-deployment-auth/access-agreement.md) when logging on to {{kib}} +* Review [considerations for using {{kib}} in production](/deploy-manage/production-guidance/kibana-in-production-environments.md), including using load balancers +* [Monitor events inside and outside of {{kib}}](/deploy-manage/monitor.md) \ No newline at end of file diff --git a/raw-migrated-files/docs-content/serverless/spaces.md b/raw-migrated-files/docs-content/serverless/spaces.md deleted file mode 100644 index c02dbc4d96..0000000000 --- a/raw-migrated-files/docs-content/serverless/spaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Spaces [spaces] - -This content applies to: [![Elasticsearch](../../../images/serverless-es-badge.svg "")](../../../solutions/search.md) [![Observability](../../../images/serverless-obs-badge.svg "")](../../../solutions/observability.md) [![Security](../../../images/serverless-sec-badge.svg "")](../../../solutions/security/elastic-security-serverless.md) - -Spaces enable you to organize your dashboards and other saved objects into meaningful categories. Once inside a space, you see only the dashboards and saved objects that belong to that space. - -When you create and enter a new project, you’re using the default space of that project. - -You can identify the space you’re in or switch to a different space from the header. - -:::{image} ../../../images/serverless-space-breadcrumb.png -:alt: Space breadcrumb -:screenshot: -::: - -You can view and manage the spaces of a project from the **Spaces** page in **Management**. - - -## Required permissions [spaces-required-permissions] - -You must have an admin role on the project to manage its **Spaces**. - - -## Create or edit a space [spaces-create-or-edit-a-space] - -You can have up to 100 spaces in a project. - -1. Click **Create space** or select the space you want to edit. -2. Provide: - - * A meaningful name and description for the space. - * A URL identifier. The URL identifier is a short text string that becomes part of the {{kib}} URL. {{kib}} suggests a URL identifier based on the name of your space, but you can customize the identifier to your liking. You cannot change the space identifier later. - -3. Customize the avatar of the space to your liking. -4. Save the space. - -{{kib}} also has an [API](https://www.elastic.co/docs/api/doc/serverless/group/endpoint-spaces) if you prefer to create spaces programmatically. - - -## Customize access to space [spaces-customize-access-to-space] - -Customizing access to a space is available for the following project types only: [![Elasticsearch](../../../images/serverless-es-badge.svg "")](../../../solutions/search.md) [![Security](../../../images/serverless-sec-badge.svg "")](../../../solutions/security/elastic-security-serverless.md) - -As an administrator, you can define custom roles with specific access to certain spaces and features in a project. Refer to [Custom roles](../../../deploy-manage/users-roles/cloud-organization/user-roles.md). - - -## Delete a space [spaces-delete-a-space] - -Deleting a space permanently removes the space and all of its contents. Find the space on the *Spaces* page and click the trash icon in the Actions column. - -You can’t delete the default space, but you can customize it to your liking. - - -## Move saved objects between spaces [spaces-move-saved-objects-between-spaces] - -To move saved objects between spaces, you can [copy objects](../../../explore-analyze/find-and-organize.md#saved-objects-copy-to-other-spaces) or [export and import objects](../../../explore-analyze/find-and-organize.md#saved-objects-import-and-export). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-deciders.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-deciders.md deleted file mode 100644 index 2cd20f2dee..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-deciders.md +++ /dev/null @@ -1,30 +0,0 @@ -# Autoscaling deciders [autoscaling-deciders] - -[Reactive storage decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates required storage capacity of current data set. Available for policies governing data nodes. - -[Proactive storage decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates required storage capacity based on current ingestion into hot nodes. Available for policies governing hot data nodes. - -[Frozen shards decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates required memory capacity based on the number of partially mounted shards. Available for policies governing frozen data nodes. - -[Frozen storage decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates required storage capacity as a percentage of the total data set of partially mounted indices. Available for policies governing frozen data nodes. - -[Frozen existence decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates a minimum require frozen memory and storage capacity when any index is in the frozen [ILM](../../../manage-data/lifecycle/index-lifecycle-management.md) phase. - -[Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Estimates required memory capacity based on machine learning jobs. Available for policies governing machine learning nodes. - -[Fixed decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) -: Responds with a fixed required capacity. This decider is intended for testing only. - - - - - - - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-fixed-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-fixed-decider.md deleted file mode 100644 index 6b41028d9f..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-fixed-decider.md +++ /dev/null @@ -1,57 +0,0 @@ -# Fixed decider [autoscaling-fixed-decider] - -::::{warning} -This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. -:::: - - -::::{warning} -The fixed decider is intended for testing only. Do not use this decider in production. -:::: - - -The [autoscaling](../../../deploy-manage/autoscaling.md) `fixed` decider responds with a fixed required capacity. It is not enabled by default but can be enabled for any policy by explicitly configuring it. - -## Configuration settings [_configuration_settings] - -`storage` -: (Optional, [byte value](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Required amount of node-level storage. Defaults to `-1` (disabled). - -`memory` -: (Optional, [byte value](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Required amount of node-level memory. Defaults to `-1` (disabled). - -`processors` -: (Optional, float) Required number of processors. Defaults to disabled. - -`nodes` -: (Optional, integer) Number of nodes to use when calculating capacity. Defaults to `1`. - - -## {{api-examples-title}} [autoscaling-fixed-decider-examples] - -This example puts an autoscaling policy named `my_autoscaling_policy`, enabling and configuring the fixed decider. - -```console -PUT /_autoscaling/policy/my_autoscaling_policy -{ - "roles" : [ "data_hot" ], - "deciders": { - "fixed": { - "storage": "1tb", - "memory": "32gb", - "processors": 2.3, - "nodes": 8 - } - } -} -``` - -The API returns the following result: - -```console-result -{ - "acknowledged": true -} -``` - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-existence-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-existence-decider.md deleted file mode 100644 index 6cf9bd4952..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-existence-decider.md +++ /dev/null @@ -1,6 +0,0 @@ -# Frozen existence decider [autoscaling-frozen-existence-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) frozen existence decider (`frozen_existence`) ensures that once the first index enters the frozen ILM phase, the frozen tier is scaled into existence. - -The frozen existence decider is enabled for all policies governing frozen data nodes and has no configuration options. - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-shards-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-shards-decider.md deleted file mode 100644 index e54a37130e..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-shards-decider.md +++ /dev/null @@ -1,10 +0,0 @@ -# Frozen shards decider [autoscaling-frozen-shards-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) frozen shards decider (`frozen_shards`) calculates the memory required to search the current set of partially mounted indices in the frozen tier. Based on a required memory amount per shard, it calculates the necessary memory in the frozen tier. - -## Configuration settings [autoscaling-frozen-shards-decider-settings] - -`memory_per_shard` -: (Optional, [byte value](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) The memory needed per shard, in bytes. Defaults to 2000 shards per 64 GB node (roughly 32 MB per shard). Notice that this is total memory, not heap, assuming that the Elasticsearch default heap sizing mechanism is used and that nodes are not bigger than 64 GB. - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-storage-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-storage-decider.md deleted file mode 100644 index c6cdcb0bd4..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-frozen-storage-decider.md +++ /dev/null @@ -1,12 +0,0 @@ -# Frozen storage decider [autoscaling-frozen-storage-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) frozen storage decider (`frozen_storage`) calculates the local storage required to search the current set of partially mounted indices based on a percentage of the total data set size of such indices. It signals that additional storage capacity is necessary when existing capacity is less than the percentage multiplied by total data set size. - -The frozen storage decider is enabled for all policies governing frozen data nodes and has no configuration options. - -## Configuration settings [autoscaling-frozen-storage-decider-settings] - -`percentage` -: (Optional, number value) Percentage of local storage relative to the data set size. Defaults to 5. - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md deleted file mode 100644 index 2ea195129a..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md +++ /dev/null @@ -1,52 +0,0 @@ -# Machine learning decider [autoscaling-machine-learning-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) {{ml}} decider (`ml`) calculates the memory and CPU requirements to run {{ml}} jobs and trained models. - -The {{ml}} decider is enabled for policies governing `ml` nodes. - -::::{note} -For {{ml}} jobs to open when the cluster is not appropriately scaled, set `xpack.ml.max_lazy_ml_nodes` to the largest number of possible {{ml}} nodes (refer to [Advanced machine learning settings](elasticsearch://reference/elasticsearch/configuration-reference/machine-learning-settings.md#advanced-ml-settings) for more information). In {{ech}}, this is automatically set. -:::: - - -## Configuration settings [autoscaling-machine-learning-decider-settings] - -Both `num_anomaly_jobs_in_queue` and `num_analytics_jobs_in_queue` are designed to delay a scale-up event. If the cluster is too small, these settings indicate how many jobs of each type can be unassigned from a node. Both settings are only considered for jobs that can be opened given the current scale. If a job is too large for any node size or if a job can’t be assigned without user intervention (for example, a user calling `_stop` against a real-time {{anomaly-job}}), the numbers are ignored for that particular job. - -`num_anomaly_jobs_in_queue` -: (Optional, integer) Specifies the number of queued {{anomaly-jobs}} to allow. Defaults to `0`. - -`num_analytics_jobs_in_queue` -: (Optional, integer) Specifies the number of queued {{dfanalytics-jobs}} to allow. Defaults to `0`. - -`down_scale_delay` -: (Optional, [time value](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#time-units)) Specifies the time to delay before scaling down. Defaults to 1 hour. If a scale down is possible for the entire time window, then a scale down is requested. If the cluster requires a scale up during the window, the window is reset. - - -## {{api-examples-title}} [autoscaling-machine-learning-decider-examples] - -This example creates an autoscaling policy named `my_autoscaling_policy` that overrides the default configuration of the {{ml}} decider. - -```console -PUT /_autoscaling/policy/my_autoscaling_policy -{ - "roles" : [ "ml" ], - "deciders": { - "ml": { - "num_anomaly_jobs_in_queue": 5, - "num_analytics_jobs_in_queue": 3, - "down_scale_delay": "30m" - } - } -} -``` - -The API returns the following result: - -```console-result -{ - "acknowledged": true -} -``` - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md deleted file mode 100644 index 17b2a7c9a0..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md +++ /dev/null @@ -1,39 +0,0 @@ -# Proactive storage decider [autoscaling-proactive-storage-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) proactive storage decider (`proactive_storage`) calculates the storage required to contain the current data set plus an estimated amount of expected additional data. - -The proactive storage decider is enabled for all policies governing nodes with the `data_hot` role. - -The estimation of expected additional data is based on past indexing that occurred within the `forecast_window`. Only indexing into data streams contributes to the estimate. - -## Configuration settings [autoscaling-proactive-storage-decider-settings] - -`forecast_window` -: (Optional, [time value](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#time-units)) The window of time to use for forecasting. Defaults to 30 minutes. - - -## {{api-examples-title}} [autoscaling-proactive-storage-decider-examples] - -This example puts an autoscaling policy named `my_autoscaling_policy`, overriding the proactive decider’s `forecast_window` to be 10 minutes. - -```console -PUT /_autoscaling/policy/my_autoscaling_policy -{ - "roles" : [ "data_hot" ], - "deciders": { - "proactive_storage": { - "forecast_window": "10m" - } - } -} -``` - -The API returns the following result: - -```console-result -{ - "acknowledged": true -} -``` - - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md deleted file mode 100644 index 7dba43dd9f..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md +++ /dev/null @@ -1,8 +0,0 @@ -# Reactive storage decider [autoscaling-reactive-storage-decider] - -The [autoscaling](../../../deploy-manage/autoscaling.md) reactive storage decider (`reactive_storage`) calculates the storage required to contain the current data set. It signals that additional storage capacity is necessary when existing capacity has been exceeded (reactively). - -The reactive storage decider is enabled for all policies governing data nodes and has no configuration options. - -The decider relies partially on using [data tier preference](../../../manage-data/lifecycle/data-tiers.md#data-tier-allocation) allocation rather than node attributes. In particular, scaling a data tier into existence (starting the first node in a tier) will result in starting a node in any data tier that is empty if not using allocation based on data tier preference. Using the [ILM migrate](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-migrate.md) action to migrate between tiers is the preferred way of allocating to tiers and fully supports scaling a tier into existence. - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/change-passwords-native-users.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/change-passwords-native-users.md deleted file mode 100644 index 9996cbaad0..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/change-passwords-native-users.md +++ /dev/null @@ -1,25 +0,0 @@ -# Setting passwords for native and built-in users [change-passwords-native-users] - -After you implement security, you might need or want to change passwords for different users. You can use the [`elasticsearch-reset-password`](elasticsearch://reference/elasticsearch/command-line-tools/reset-password.md) tool or the [change passwords API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password) to change passwords for native users and [built-in users](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md), such as the `elastic` or `kibana_system` users. - -For example, the following command changes the password for a user with the username `user1` to an auto-generated value, and prints the new password to the terminal: - -```shell -bin/elasticsearch-reset-password -u user1 -``` - -To explicitly set a password for a user, include the `-i` parameter with the intended password. - -```shell -bin/elasticsearch-reset-password -u user1 -i -``` - -If you’re working in {{kib}} or don’t have command-line access, you can use the change passwords API to change a user’s password: - -```console -POST /_security/user/user1/_password -{ - "password" : "new-test-password" -} -``` - diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshot-restore.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshot-restore.md deleted file mode 100644 index 6c253ea998..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshot-restore.md +++ /dev/null @@ -1,144 +0,0 @@ -# Snapshot and restore [snapshot-restore] - -A snapshot is a backup of a running {{es}} cluster. You can use snapshots to: - -* Regularly back up a cluster with no downtime -* Recover data after deletion or a hardware failure -* Transfer data between clusters -* Reduce your storage costs by using [searchable snapshots](../../../deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md) in the cold and frozen data tiers - - -## The snapshot workflow [snapshot-workflow] - -{{es}} stores snapshots in an off-cluster storage location called a snapshot repository. Before you can take or restore snapshots, you must [register a snapshot repository](../../../deploy-manage/tools/snapshot-and-restore/self-managed.md) on the cluster. {{es}} supports several repository types with cloud storage options, including: - -* AWS S3 -* Google Cloud Storage (GCS) -* Microsoft Azure - -After you register a snapshot repository, you can use [{{slm}} ({{slm-init}})](../../../deploy-manage/tools/snapshot-and-restore/create-snapshots.md#automate-snapshots-slm) to automatically take and manage snapshots. You can then [restore a snapshot](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md) to recover or transfer its data. - - -## Snapshot contents [snapshot-contents] - -By default, a snapshot of a cluster contains the cluster state, all regular data streams, and all regular indices. The cluster state includes: - -* [Persistent cluster settings](../../../deploy-manage/deploy/self-managed/configure-elasticsearch.md#cluster-setting-types) -* [Index templates](../../../manage-data/data-store/templates.md) -* [Legacy index templates](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) -* [Ingest pipelines](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md) -* [{{ilm-init}} policies](../../../manage-data/lifecycle/index-lifecycle-management.md) -* [Stored scripts](../../../explore-analyze/scripting/modules-scripting-using.md#script-stored-scripts) -* For snapshots taken after 7.12.0, [feature states](../../../deploy-manage/tools/snapshot-and-restore.md#feature-state) - -You can also take snapshots of only specific data streams or indices in the cluster. A snapshot that includes a data stream or index automatically includes its aliases. When you restore a snapshot, you can choose whether to restore these aliases. - -Snapshots don’t contain or back up: - -* Transient cluster settings -* Registered snapshot repositories -* Node configuration files -* [Security configuration files](../../../deploy-manage/security.md) - -::::{note} -When restoring a data stream, if the target cluster does not have an index template that matches the data stream, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. -:::: - - - -### Feature states [feature-state] - -A feature state contains the indices and data streams used to store configurations, history, and other data for an Elastic feature, such as {{es}} security or {{kib}}. - -::::{tip} -To retrieve a list of feature states, use the [Features API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features). -:::: - - -A feature state typically includes one or more [system indices or system data streams](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#system-indices). It may also include regular indices and data streams used by the feature. For example, a feature state may include a regular index that contains the feature’s execution history. Storing this history in a regular index lets you more easily search it. - -In {{es}} 8.0 and later versions, feature states are the only way to back up and restore system indices and system data streams. - - -## How snapshots work [how-snapshots-work] - -Snapshots are automatically deduplicated to save storage space and reduce network transfer costs. To back up an index, a snapshot makes a copy of the index’s [segments](../../../manage-data/data-store/near-real-time-search.md) and stores them in the snapshot repository. Since segments are immutable, the snapshot only needs to copy any new segments created since the repository’s last snapshot. - -Each snapshot is also logically independent. When you delete a snapshot, {{es}} only deletes the segments used exclusively by that snapshot. {{es}} doesn’t delete segments used by other snapshots in the repository. - - -### Snapshots and shard allocation [snapshots-shard-allocation] - -A snapshot copies segments from an index’s primary shards. When you start a snapshot, {{es}} immediately starts copying the segments of any available primary shards. If a shard is starting or relocating, {{es}} will wait for these processes to complete before copying the shard’s segments. If one or more primary shards aren’t available, the snapshot attempt fails. - -Once a snapshot begins copying a shard’s segments, {{es}} won’t move the shard to another node, even if rebalancing or shard allocation settings would typically trigger reallocation. {{es}} will only move the shard after the snapshot finishes copying the shard’s data. - - -### Snapshot start and stop times [snapshot-start-stop-times] - -A snapshot doesn’t represent a cluster at a precise point in time. Instead, each snapshot includes a start and end time. The snapshot represents a view of each shard’s data at some point between these two times. - - -## Snapshot compatibility [snapshot-restore-version-compatibility] - -To restore a snapshot to a cluster, the versions for the snapshot, cluster, and any restored indices must be compatible. - - -### Snapshot version compatibility [snapshot-cluster-compatibility] - -You can’t restore a snapshot to an earlier version of {{es}}. For example, you can’t restore a snapshot taken in 7.6.0 to a cluster running 7.5.0. - -::::{note} -:name: snapshot-prerelease-build-compatibility - -This documentation is for {{es}} version 9.0.0-beta1, which is not yet released. The compatibility table above applies only to snapshots taken in a released version of {{es}}. If you’re testing a pre-release build of {{es}} then you can still restore snapshots taken in earlier released builds as permitted by this compatibility table. You can also take snapshots using your pre-release build, and restore them using the same build. However once a pre-release build of {{es}} has written to a snapshot repository you must not use the same repository with other builds of {{es}}, even if the builds have the same version. Different pre-release builds of {{es}} may use different and incompatible repository layouts. If the repository layout is incompatible with the {{es}} build in use then taking and restoring snapshots may result in errors or may appear to succeed having silently lost some data. You should discard your repository before using a different build. -:::: - - - -### Index compatibility [snapshot-index-compatibility] - -Any index you restore from a snapshot must also be compatible with the current cluster’s version. If you try to restore an index created in an incompatible version, the restore attempt will fail. - -| Index creation version | 6.8 | 7.0–7.1 | 7.2–7.17 | 8.0–8.2 | 8.3–8.17 | -|------------------------|-----|---------|---------|---------|---------| -| 5.0–5.6 | ✅ | ❌ | ❌ | ❌ | ✅ [1] | -| 6.0–6.7 | ✅ | ✅ | ✅ | ❌ | ✅ [1] | -| 6.8 | ✅ | ❌ | ✅ | ❌ | ✅ [1] | -| 7.0–7.1 | ❌ | ✅ | ✅ | ✅ | ✅ | -| 7.2–7.17 | ❌ | ❌ | ✅ | ✅ | ✅ | -| 8.0–8.17 | ❌ | ❌ | ❌ | ✅ | ✅ | - -[1] Supported with [archive indices](../../../deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md). - -You can’t restore an index to an earlier version of {{es}}. For example, you can’t restore an index created in 7.6.0 to a cluster running 7.5.0. - -A compatible snapshot can contain indices created in an older incompatible version. For example, a snapshot of a 8.17 cluster can contain an index created in 6.8. Restoring the 6.8 index to an 9.0 cluster fails unless you can use the [archive functionality](../../../deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md). Keep this in mind if you take a snapshot before upgrading a cluster. - -As a workaround, you can first restore the index to another cluster running the latest version of {{es}} that’s compatible with both the index and your current cluster. You can then use [reindex-from-remote](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) to rebuild the index on your current cluster. Reindex from remote is only possible if the index’s [`_source`](elasticsearch://reference/elasticsearch/mapping-reference/mapping-source-field.md) is enabled. - -Reindexing from remote can take significantly longer than restoring a snapshot. Before you start, test the reindex from remote process with a subset of the data to estimate your time requirements. - - -## Warnings [snapshot-restore-warnings] - - -### Other backup methods [other-backup-methods] - -**Taking a snapshot is the only reliable and supported way to back up a cluster.** You cannot back up an {{es}} cluster by making copies of the data directories of its nodes. There are no supported methods to restore any data from a filesystem-level backup. If you try to restore a cluster from such a backup, it may fail with reports of corruption or missing files or other data inconsistencies, or it may appear to have succeeded having silently lost some of your data. - -A copy of the data directories of a cluster’s nodes does not work as a backup because it is not a consistent representation of their contents at a single point in time. You cannot fix this by shutting down nodes while making the copies, nor by taking atomic filesystem-level snapshots, because {{es}} has consistency requirements that span the whole cluster. You must use the built-in snapshot functionality for cluster backups. - - -### Repository contents [snapshot-repository-contents] - -**Don’t modify anything within the repository or run processes that might interfere with its contents.** If something other than {{es}} modifies the contents of the repository then future snapshot or restore operations may fail, reporting corruption or other data inconsistencies, or may appear to succeed having silently lost some of your data. - -You may however safely [restore a repository from a backup](../../../deploy-manage/tools/snapshot-and-restore/self-managed.md#snapshots-repository-backup) as long as - -1. The repository is not registered with {{es}} while you are restoring its contents. -2. When you have finished restoring the repository its contents are exactly as they were when you took the backup. - -If you no longer need any of the snapshots in a repository, unregister it from {{es}} before deleting its contents from the underlying storage. - -Additionally, snapshots may contain security-sensitive information, which you may wish to [store in a dedicated repository](../../../deploy-manage/tools/snapshot-and-restore/create-snapshots.md#cluster-state-snapshots). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshots-restore-snapshot.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshots-restore-snapshot.md deleted file mode 100644 index 1447b04387..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/snapshots-restore-snapshot.md +++ /dev/null @@ -1,503 +0,0 @@ -# Restore a snapshot [snapshots-restore-snapshot] - -This guide shows you how to restore a snapshot. Snapshots are a convenient way to store a copy of your data outside of a cluster. You can restore a snapshot to recover indices and data streams after deletion or a hardware failure. You can also use snapshots to transfer data between clusters. - -In this guide, you’ll learn how to: - -* Get a list of available snapshots -* Restore an index or data stream from a snapshot -* Restore a feature state -* Restore an entire cluster -* Monitor the restore operation -* Cancel an ongoing restore - -This guide also provides tips for [restoring to another cluster](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-different-cluster) and [troubleshooting common restore errors](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#troubleshoot-restore). - - -## Prerequisites [restore-snapshot-prereqs] - -* To use {{kib}}'s **Snapshot and Restore** feature, you must have the following permissions: - - * [Cluster privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-cluster): `monitor`, `manage_slm`, `cluster:admin/snapshot`, and `cluster:admin/repository` - * [Index privilege](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-indices): `all` on the `monitor` index - -* You can only restore a snapshot to a running cluster with an elected [master node](../../../deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md#master-node-role). The snapshot’s repository must be [registered](../../../deploy-manage/tools/snapshot-and-restore/self-managed.md) and available to the cluster. -* The snapshot and cluster versions must be compatible. See [Snapshot compatibility](../../../deploy-manage/tools/snapshot-and-restore.md#snapshot-restore-version-compatibility). -* To restore a snapshot, the cluster’s global metadata must be writable. Ensure there aren’t any [cluster blocks](elasticsearch://reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-read-only) that prevent writes. The restore operation ignores [index blocks](elasticsearch://reference/elasticsearch/index-settings/index-block.md). -* Before you restore a data stream, ensure the cluster contains a [matching index template](../../../manage-data/data-store/data-streams/set-up-data-stream.md#create-index-template) with data stream enabled. To check, use {{kib}}'s [**Index Management**](../../../manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md#manage-index-templates) feature or the [get index template API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template): - - ```console - GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream - ``` - - If no such template exists, you can [create one](../../../manage-data/data-store/data-streams/set-up-data-stream.md#create-index-template) or [restore a cluster state](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-entire-cluster) that contains one. Without a matching index template, a data stream can’t roll over or create backing indices. - -* If your snapshot contains data from App Search or Workplace Search, ensure you’ve restored the [Enterprise Search encryption key](https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html) before restoring the snapshot. -:::{important} -Enterprise Search is not available in {{stack}} 9.0+. -::: - - -## Considerations [restore-snapshot-considerations] - -When restoring data from a snapshot, keep the following in mind: - -* If you restore a data stream, you also restore its backing indices. -* You can only restore an existing index if it’s [closed](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) and the index in the snapshot has the same number of primary shards. -* You can’t restore an existing open index. This includes backing indices for a data stream. -* The restore operation automatically opens restored indices, including backing indices. -* You can restore only a specific backing index from a data stream. However, the restore operation doesn’t add the restored backing index to any existing data stream. - - -## Get a list of available snapshots [get-snapshot-list] - -To view a list of available snapshots in {{kib}}, go to the main menu and click **Stack Management > Snapshot and Restore**. - -You can also use the [get repository API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) and the [get snapshot API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) to find snapshots that are available to restore. First, use the get repository API to fetch a list of registered snapshot repositories. - -```console -GET _snapshot -``` - -Then use the get snapshot API to get a list of snapshots in a specific repository. This also returns each snapshot’s contents. - -```console -GET _snapshot/my_repository/*?verbose=false -``` - - -## Restore an index or data stream [restore-index-data-stream] - -You can restore a snapshot using {{kib}}'s **Snapshot and Restore** feature or the [restore snapshot API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore). - -By default, a restore request attempts to restore all regular indices and regular data streams in a snapshot. In most cases, you only need to restore a specific index or data stream from a snapshot. However, you can’t restore an existing open index. - -If you’re restoring data to a pre-existing cluster, use one of the following methods to avoid conflicts with existing indices and data streams: - -* [Delete and restore](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#delete-restore) -* [Rename on restore](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#rename-on-restore) - - -### Delete and restore [delete-restore] - -The simplest way to avoid conflicts is to delete an existing index or data stream before restoring it. To prevent the accidental re-creation of the index or data stream, we recommend you temporarily stop all indexing until the restore operation is complete. - -::::{warning} -If the [`action.destructive_requires_name`](elasticsearch://reference/elasticsearch/configuration-reference/index-management-settings.md#action-destructive-requires-name) cluster setting is `false`, don’t use the [delete index API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) to target the `*` or `.*` wildcard pattern. If you use {{es}}'s security features, this will delete system indices required for authentication. Instead, target the `*,-.*` wildcard pattern to exclude these system indices and other index names that begin with a dot (`.`). -:::: - - -```console -# Delete an index -DELETE my-index - -# Delete a data stream -DELETE _data_stream/logs-my_app-default -``` - -In the restore request, explicitly specify any indices and data streams to restore. - -```console -POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore -{ - "indices": "my-index,logs-my_app-default" -} -``` - - -### Rename on restore [rename-on-restore] - -If you want to avoid deleting existing data, you can instead rename the indices and data streams you restore. You typically use this method to compare existing data to historical data from a snapshot. For example, you can use this method to review documents after an accidental update or deletion. - -Before you start, ensure the cluster has enough capacity for both the existing and restored data. - -The following restore snapshot API request prepends `restored-` to the name of any restored index or data stream. - -```console -POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore -{ - "indices": "my-index,logs-my_app-default", - "rename_pattern": "(.+)", - "rename_replacement": "restored-$1" -} -``` - -If the rename options produce two or more indices or data streams with the same name, the restore operation fails. - -If you rename a data stream, its backing indices are also renamed. For example, if you rename the `logs-my_app-default` data stream to `restored-logs-my_app-default`, the backing index `.ds-logs-my_app-default-2099.03.09-000005` is renamed to `.ds-restored-logs-my_app-default-2099.03.09-000005`. - -When the restore operation is complete, you can compare the original and restored data. If you no longer need an original index or data stream, you can delete it and use a [reindex](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) to rename the restored one. - -```console -# Delete the original index -DELETE my-index - -# Reindex the restored index to rename it -POST _reindex -{ - "source": { - "index": "restored-my-index" - }, - "dest": { - "index": "my-index" - } -} - -# Delete the original data stream -DELETE _data_stream/logs-my_app-default - -# Reindex the restored data stream to rename it -POST _reindex -{ - "source": { - "index": "restored-logs-my_app-default" - }, - "dest": { - "index": "logs-my_app-default", - "op_type": "create" - } -} -``` - - -## Restore a feature state [restore-feature-state] - -You can restore a [feature state](../../../deploy-manage/tools/snapshot-and-restore.md#feature-state) to recover system indices, system data streams, and other configuration data for a feature from a snapshot. - -If you restore a snapshot’s cluster state, the operation restores all feature states in the snapshot by default. Similarly, if you don’t restore a snapshot’s cluster state, the operation doesn’t restore any feature states by default. You can also choose to restore only specific feature states from a snapshot, regardless of the cluster state. - -To view a snapshot’s feature states, use the get snapshot API. - -```console -GET _snapshot/my_repository/my_snapshot_2099.05.06 -``` - -The response’s `feature_states` property contains a list of features in the snapshot as well as each feature’s indices. - -To restore a specific feature state from the snapshot, specify the `feature_name` from the response in the restore snapshot API’s [`feature_states`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore) parameter. - -::::{note} -When you restore a feature state, {{es}} closes and overwrites the feature’s existing indices. -:::: - - -::::{warning} -Restoring the `security` feature state overwrites system indices used for authentication. If you use {{ech}}, ensure you have access to the {{ecloud}} Console before restoring the `security` feature state. If you run {{es}} on your own hardware, [create a superuser in the file realm](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-create-file-realm-user) to ensure you’ll still be able to access your cluster. -:::: - - -```console -POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore -{ - "feature_states": [ "geoip" ], - "include_global_state": false, <1> - "indices": "-*" <2> -} -``` - -1. Exclude the cluster state from the restore operation. -2. Exclude the other indices and data streams in the snapshot from the restore operation. - - - -## Restore an entire cluster [restore-entire-cluster] - -In some cases, you need to restore an entire cluster from a snapshot, including the cluster state and all [feature states](../../../deploy-manage/tools/snapshot-and-restore.md#feature-state). These cases should be rare, such as in the event of a catastrophic failure. - -Restoring an entire cluster involves deleting important system indices, including those used for authentication. Consider whether you can restore specific indices or data streams instead. - -If you’re restoring to a different cluster, see [Restore to a different cluster](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-different-cluster) before you start. - -1. If you [backed up the cluster’s configuration files](../../../deploy-manage/tools/snapshot-and-restore/create-snapshots.md#back-up-config-files), you can restore them to each node. This step is optional and requires a [full cluster restart](../../../deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md). - - After you shut down a node, copy the backed-up configuration files over to the node’s `$ES_PATH_CONF` directory. Before restarting the node, ensure `elasticsearch.yml` contains the appropriate node roles, node name, and other node-specific settings. - - If you choose to perform this step, you must repeat this process on each node in the cluster. - -2. Temporarily stop indexing and turn off the following features: - - * GeoIP database downloader and ILM history store - - ```console - PUT _cluster/settings - { - "persistent": { - "ingest.geoip.downloader.enabled": false, - "indices.lifecycle.history_index_enabled": false - } - } - ``` - - * ILM - - ```console - POST _ilm/stop - ``` - - - * Machine Learning - - ```console - POST _ml/set_upgrade_mode?enabled=true - ``` - - - * Monitoring - - ```console - PUT _cluster/settings - { - "persistent": { - "xpack.monitoring.collection.enabled": false - } - } - ``` - - * Watcher - - ```console - POST _watcher/_stop - ``` - - - * Universal Profiling - - Check if Universal Profiling index template management is enabled: - - ```console - GET /_cluster/settings?filter_path=**.xpack.profiling.templates.enabled&include_defaults=true - ``` - - If the value is `true`, disable Universal Profiling index template management: - - ```console - PUT _cluster/settings - { - "persistent": { - "xpack.profiling.templates.enabled": false - } - } - ``` - -3. $$$restore-create-file-realm-user$$$If you use {{es}} security features, log in to a node host, navigate to the {{es}} installation directory, and add a user with the `superuser` role to the file realm using the [`elasticsearch-users`](elasticsearch://reference/elasticsearch/command-line-tools/users-command.md) tool. - - For example, the following command creates a user named `restore_user`. - - ```sh - ./bin/elasticsearch-users useradd restore_user -p my_password -r superuser - ``` - - Use this file realm user to authenticate requests until the restore operation is complete. - -4. Use the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) to set [`action.destructive_requires_name`](elasticsearch://reference/elasticsearch/configuration-reference/index-management-settings.md#action-destructive-requires-name) to `false`. This lets you delete data streams and indices using wildcards. - - ```console - PUT _cluster/settings - { - "persistent": { - "action.destructive_requires_name": false - } - } - ``` - -5. Delete all existing data streams on the cluster. - - ```console - DELETE _data_stream/*?expand_wildcards=all - ``` - -6. Delete all existing indices on the cluster. - - ```console - DELETE *?expand_wildcards=all - ``` - -7. Restore the entire snapshot, including the cluster state. By default, restoring the cluster state also restores any feature states in the snapshot. - - ```console - POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore - { - "indices": "*", - "include_global_state": true - } - ``` - -8. When the restore operation is complete, resume indexing and restart any features you stopped: - - ::::{note} - When the snapshot is restored, the license that was in use at the time the snapshot was taken will be restored as well. If your license has expired since the snapshot was taken, you will need to use the [Update License API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) to install a current license. - :::: - - - * GeoIP database downloader and ILM history store - - ```console - PUT _cluster/settings - { - "persistent": { - "ingest.geoip.downloader.enabled": true, - "indices.lifecycle.history_index_enabled": true - } - } - ``` - - * ILM - - ```console - POST _ilm/start - ``` - - * Machine Learning - - ```console - POST _ml/set_upgrade_mode?enabled=false - ``` - - * Monitoring - - ```console - PUT _cluster/settings - { - "persistent": { - "xpack.monitoring.collection.enabled": true - } - } - ``` - - * Watcher - - ```console - POST _watcher/_start - ``` - - - * Universal Profiling - - If the value was `true` initially, enable Universal Profiling index template management again, otherwise skip this step: - - ```console - PUT _cluster/settings - { - "persistent": { - "xpack.profiling.templates.enabled": true - } - } - ``` - -9. If wanted, reset the `action.destructive_requires_name` cluster setting. - - ```console - PUT _cluster/settings - { - "persistent": { - "action.destructive_requires_name": null - } - } - ``` - - - -## Monitor a restore [monitor-restore] - -The restore operation uses the [shard recovery process](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery) to restore an index’s primary shards from a snapshot. While the restore operation recovers primary shards, the cluster will have a `yellow` [health status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health). - -After all primary shards are recovered, the replication process creates and distributes replicas across eligible data nodes. When replication is complete, the cluster health status typically becomes `green`. - -Once you start a restore in {{kib}}, you’re navigated to the **Restore Status** page. You can use this page to track the current state for each shard in the snapshot. - -You can also monitor snapshot recover using {{es}} APIs. To monitor the cluster health status, use the [cluster health API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health). - -```console -GET _cluster/health -``` - -To get detailed information about ongoing shard recoveries, use the [index recovery API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery). - -```console -GET my-index/_recovery -``` - -To view any unassigned shards, use the [cat shards API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards). - -```console -GET _cat/shards?v=true&h=index,shard,prirep,state,node,unassigned.reason&s=state -``` - -Unassigned shards have a `state` of `UNASSIGNED`. The `prirep` value is `p` for primary shards and `r` for replicas. The `unassigned.reason` describes why the shard remains unassigned. - -To get a more in-depth explanation of an unassigned shard’s allocation status, use the [cluster allocation explanation API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain). - -```console -GET _cluster/allocation/explain -{ - "index": "my-index", - "shard": 0, - "primary": false, - "current_node": "my-node" -} -``` - - -## Cancel a restore [cancel-restore] - -You can delete an index or data stream to cancel its ongoing restore. This also deletes any existing data in the cluster for the index or data stream. Deleting an index or data stream doesn’t affect the snapshot or its data. - -```console -# Delete an index -DELETE my-index - -# Delete a data stream -DELETE _data_stream/logs-my_app-default -``` - - -## Restore to a different cluster [restore-different-cluster] - -::::{tip} -{{ech}} can help you restore snapshots from other deployments. See [Work with snapshots](../../../deploy-manage/tools/snapshot-and-restore.md). -:::: - - -Snapshots aren’t tied to a particular cluster or a cluster name. You can create a snapshot in one cluster and restore it in another [compatible cluster](../../../deploy-manage/tools/snapshot-and-restore.md#snapshot-restore-version-compatibility). Any data stream or index you restore from a snapshot must also be compatible with the current cluster’s version. The topology of the clusters doesn’t need to match. - -To restore a snapshot, its repository must be [registered](../../../deploy-manage/tools/snapshot-and-restore/self-managed.md) and available to the new cluster. If the original cluster still has write access to the repository, register the repository as read-only. This prevents multiple clusters from writing to the repository at the same time and corrupting the repository’s contents. It also prevents {{es}} from caching the repository’s contents, which means that changes made by other clusters will become visible straight away. - -Before you start a restore operation, ensure the new cluster has enough capacity for any data streams or indices you want to restore. If the new cluster has a smaller capacity, you can: - -* Add nodes or upgrade your hardware to increase capacity. -* Restore fewer indices and data streams. -* Reduce the [number of replicas](elasticsearch://reference/elasticsearch/index-settings/index-modules.md#dynamic-index-number-of-replicas) for restored indices. - - For example, the following restore snapshot API request uses the `index_settings` option to set `index.number_of_replicas` to `1`. - - ```console - POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore - { - "indices": "my-index,logs-my_app-default", - "index_settings": { - "index.number_of_replicas": 1 - } - } - ``` - - -If indices or backing indices in the original cluster were assigned to particular nodes using [shard allocation filtering](../../../deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/index-level-shard-allocation.md), the same rules will be enforced in the new cluster. If the new cluster does not contain nodes with appropriate attributes that a restored index can be allocated on, the index will not be successfully restored unless these index allocation settings are changed during the restore operation. - -The restore operation also checks that restored persistent settings are compatible with the current cluster to avoid accidentally restoring incompatible settings. If you need to restore a snapshot with incompatible persistent settings, try restoring it without the [global cluster state](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore). - - -## Troubleshoot restore errors [troubleshoot-restore] - -Here’s how to resolve common errors returned by restore requests. - - -### Cannot restore index [] because an open index with same name already exists in the cluster [_cannot_restore_index_index_because_an_open_index_with_same_name_already_exists_in_the_cluster] - -You can’t restore an open index that already exists. To resolve this error, try one of the methods in [Restore an index or data stream](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-index-data-stream). - - -### Cannot restore index [] with [x] shards from a snapshot of index [] with [y] shards [_cannot_restore_index_index_with_x_shards_from_a_snapshot_of_index_snapshot_index_with_y_shards] - -You can only restore an existing index if it’s closed and the index in the snapshot has the same number of primary shards. This error indicates the index in the snapshot has a different number of primary shards. - -To resolve this error, try one of the methods in [Restore an index or data stream](../../../deploy-manage/tools/snapshot-and-restore/restore-snapshot.md#restore-index-data-stream). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-autoscaling.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-autoscaling.md deleted file mode 100644 index 1b0e58abae..0000000000 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-autoscaling.md +++ /dev/null @@ -1,18 +0,0 @@ -# Autoscaling [xpack-autoscaling] - -::::{note} -{cloud-only} -:::: - - -The [autoscaling](../../../deploy-manage/autoscaling.md) feature enables an operator to configure tiers of nodes that self-monitor whether or not they need to scale based on an operator-defined policy. Then, via the autoscaling API, an Elasticsearch cluster can report whether or not it needs additional resources to meet the policy. For example, an operator could define a policy that a warm tier should scale on available disk space. Elasticsearch would monitor and forecast the available disk space in the warm tier, and if the forecast is such that the cluster will soon not be able to allocate existing and future shard copies due to disk space, then the autoscaling API would report that the cluster needs to scale due to disk space. It remains the responsibility of the operator to add the additional resources that the cluster signals it requires. - -A policy is composed of a list of roles and a list of deciders. Nodes matching the roles are governed by the policy. The deciders provide independent estimates of the capacity required. See [Autoscaling deciders](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more information on the deciders available. - -Autoscaling supports the scale-up and scale-down of dedicated {{ml}} nodes. Autoscaling also supports the scale-up of data nodes based on storage. - -::::{note} -Autoscaling is not supported on Debian 8. -:::: - - diff --git a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md b/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md deleted file mode 100644 index 5097f6beea..0000000000 --- a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md +++ /dev/null @@ -1,533 +0,0 @@ -# Installing in an air-gapped environment [air-gapped-install] - -Some components of the {{stack}} require additional configuration and local dependencies in order to deploy in environments without internet access. This guide gives an overview of this setup scenario and helps bridge together existing documentation for individual parts of the stack. - -* [1. Self-Managed Install (Linux)](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-self-managed-linux) - - * [1.1. {{es}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elasticsearch) - * [1.2. {{kib}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana) - * [1.3. {{beats}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-beats) - * [1.4. {{ls}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-logstash) - * [1.5. {{agent}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-agent) - * [1.6. {{fleet-server}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-fleet) - * [1.7. Elastic APM](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-apm) - * [1.8. {{ems}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-maps-service) - * [1.9. {{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry) - * [1.10. {{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) - * [1.11. Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository) - * [1.12 {{ml-cap}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-machine-learning) - - -* [2. Kubernetes & OpenShift Install](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kubernetes-and-openshift) - - * [2.1. Elastic Kubernetes Operator (ECK)](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-k8s-os-elastic-kubernetes-operator) - * [2.2. Elastic Package Registry](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-k8s-os-elastic-package-registry) - * [2.3. {{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-k8s-os-elastic-artifact-registry) - * [2.4. Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-k8s-os-elastic-endpoint-artifact-repository) - * [2.5. Ironbank Secure Images for Elastic](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-k8s-os-ironbank-secure-images) - - -* [3.0 {{ece}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-ece) - -* [Appendix A - {{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry-example) -* [Appendix B - {{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry-example) -* [Appendix C - EPR Kubernetes Deployment](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-epr-kubernetes-example) -* [Appendix D - Agent Integration Guide](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-guide) - - * [D.1. Terminology](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-terminology) - * [D.2. How to configure](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure) - - * [D.2.1. Using the {{kib}} UI](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-kibana) - * [D.2.2. Using the `kibana.yml` config file](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-yml) - * [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) - - -::::{note} -If you’re working in an air-gapped environment and have a subscription level that includes Support coverage, [contact us](https://www.elastic.co/contact) if you’d like to request an offline version of the Elastic documentation. -:::: - - - -### 1. Self-Managed Install (Linux) [air-gapped-self-managed-linux] - -Refer to the section for each Elastic component for air-gapped installation configuration and dependencies in a self-managed Linux environment. - - -#### 1.1. {{es}} [air-gapped-elasticsearch] - -Air-gapped install of {{es}} may require additional steps in order to access some of the features. General install and configuration guides are available in the [{{es}} install documentation](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md). - -Specifically: - -* To be able to use the GeoIP processor, refer to [the GeoIP processor documentation](elasticsearch://reference/ingestion-tools/enrich-processor/geoip-processor.md#manually-update-geoip-databases) for instructions on downloading and deploying the required databases. -* Refer to [{{ml-cap}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-machine-learning) for instructions on deploying the Elastic Learned Sparse EncodeR (ELSER) natural language processing (NLP) model and other trained {{ml}} models. - - -#### 1.2. {{kib}} [air-gapped-kibana] - -Air-gapped install of {{kib}} may require a number of additional services in the local network in order to access some of the features. General install and configuration guides are available in the [{{kib}} install documentation](../../../deploy-manage/deploy/self-managed/install-kibana.md). - -Specifically: - -* To be able to use {{kib}} mapping visualizations, you need to set up and configure the [Elastic Maps Service](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-maps-service). -* To be able to use {{kib}} sample data, install or update hundreds of prebuilt alert rules, and explore available data integrations, you need to set up and configure the [{{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry). -* To provide detection rule updates for {{endpoint-sec}} agents, you need to set up and configure the [Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository). -* To access the APM integration, you need to set up and configure [Elastic APM](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-apm). -* To install and use the Elastic documentation for {{kib}} AI assistants, you need to set up and configure the [Elastic product documentation for {{kib}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana-product-documentation). - - -#### 1.3. {{beats}} [air-gapped-beats] - -Elastic {{beats}} are light-weight data shippers. They do not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{beats}} documentation](asciidocalypse://docs/beats/docs/reference/index.md). - - -#### 1.4. {{ls}} [air-gapped-logstash] - -{{ls}} is a versatile data shipping and processing application. It does not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{ls}} documentation](logstash://reference/index.md). - - -#### 1.5. {{agent}} [air-gapped-elastic-agent] - -Air-gapped install of {{agent}} depends on the [{{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry) and the [{{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) for most use-cases. The agent itself is fairly lightweight and installs dependencies only as required by its configuration. In terms of connections to these dependencies, {{agents}} need to be able to connect to the {{artifact-registry}} directly, but {{package-registry}} connections are handled through [{{kib}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana). - -Additionally, if the {{agent}} {{elastic-defend}} integration is used, then access to the [Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository) is necessary in order to deploy updates for some of the detection and prevention capabilities. - -To learn more about install and configuration, refer to the [{{agent}} install documentation](/reference/ingestion-tools/fleet/install-elastic-agents.md). Make sure to check the requirements specific to running {{agents}} in an [air-gapped environment](/reference/ingestion-tools/fleet/air-gapped.md). - -To get a better understanding of how to work with {{agent}} configuration settings and policies, refer to [Appendix D - Agent Integration Guide](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-guide). - - -#### 1.6. {{fleet-server}} [air-gapped-fleet] - -{{fleet-server}} is a required middleware component for any scalable deployment of the {{agent}}. The air-gapped dependencies of {{fleet-server}} are the same as those of the [{{agent}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-agent). - -To learn more about installing {{fleet-server}}, refer to the [{{fleet-server}} set up documentation](/reference/ingestion-tools/fleet/fleet-server.md). - - -#### 1.7. Elastic APM [air-gapped-elastic-apm] - -Air-gapped setup of the APM server is possible in two ways: - -* By setting up one of the {{agent}} deployments with an APM integration, as described in [Switch a self-installation to the APM integration](/solutions/observability/apps/switch-self-installation-to-apm-integration.md). -* Or, by installing a standalone Elastic APM Server, as described in the [APM configuration documentation](/solutions/observability/apps/configure-apm-server.md). - - -#### 1.8. {{ems}} [air-gapped-elastic-maps-service] - -Refer to [Connect to {{ems}}](../../../explore-analyze/visualize/maps/maps-connect-to-ems.md) in the {{kib}} documentation to learn how to configure your firewall to connect to {{ems}}, host it locally, or disable it completely. - - -#### 1.9. {{package-registry}} [air-gapped-elastic-package-registry] - -Air-gapped install of the EPR is possible using any OCI-compatible runtime like Podman (a typical choice for RHEL-like Linux systems) or Docker. Links to the official container image and usage guide is available on the [Air-gapped environments](/reference/ingestion-tools/fleet/air-gapped.md) page in the {{fleet}} and {{agent}} Guide. - -Refer to [Appendix A - {{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry-example) for additional setup examples. - -::::{note} -Besides setting up the EPR service, you also need to [configure {{kib}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana) to use this service. If using TLS with the EPR service, it is also necessary to set up {{kib}} to trust the certificate presented by the EPR. -:::: - - - -#### 1.10. {{artifact-registry}} [air-gapped-elastic-artifact-registry] - -Air-gapped install of the {{artifact-registry}} is necessary in order to enable {{agent}} deployments to perform self-upgrades and install certain components which are needed for some of the data integrations (that is, in addition to what is also retrieved from the EPR). To learn more, refer to [Host your own artifact registry for binary downloads](/reference/ingestion-tools/fleet/air-gapped.md#host-artifact-registry) in the {{fleet}} and {{elastic-agent}} Guide. - -Refer to [Appendix B - {{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry-example) for additional setup examples. - -::::{note} -When setting up own web server, such as NGINX, to function as the {{artifact-registry}}, it is recommended not to use TLS as there are, currently, no direct ways to establish certificate trust between {{agents}} and this service. -:::: - - - -#### 1.11. Elastic Endpoint Artifact Repository [air-gapped-elastic-endpoint-artifact-repository] - -Air-gapped setup of this component is, essentially, identical to the setup of the [{{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) except that different artifacts are served. To learn more, refer to [Configure offline endpoints and air-gapped environments](../../../solutions/security/configure-elastic-defend/configure-offline-endpoints-air-gapped-environments.md) in the Elastic Security guide. - - -#### 1.12 {{ml-cap}} [air-gapped-machine-learning] - -Some {{ml}} features, like natural language processing (NLP), require you to deploy trained models. To learn about deploying {{ml}} models in an air-gapped environment, refer to: - -* [Deploy ELSER in an air-gapped environment](../../../explore-analyze/machine-learning/nlp/ml-nlp-elser.md#air-gapped-install). -* [Install trained models in an air-gapped environment with Eland](eland://reference/machine-learning.md#ml-nlp-pytorch-air-gapped). - - -#### 1.13 {{kib}} Product documentation for AI Assistants [air-gapped-kibana-product-documentation] - -Detailed install and configuration instructions are available in the [{{kib}} AI Assistants settings documentation](kibana://reference/configuration-reference/ai-assistant-settings.md). - - -### 2. Kubernetes & OpenShift Install [air-gapped-kubernetes-and-openshift] - -Setting up air-gapped Kubernetes or OpenShift installs of the {{stack}} has some unique concerns, but the general dependencies are the same as in the self-managed install case on a regular Linux machine. - - -#### 2.1. Elastic Kubernetes Operator (ECK) [air-gapped-k8s-os-elastic-kubernetes-operator] - -The Elastic Kubernetes operator is an additional component in the Kubernetes OpenShift install that, essentially, does a lot of the work in installing, configuring, and updating deployments of the {{stack}}. For details, refer to the [{{eck}} install instructions](../../../deploy-manage/deploy/cloud-on-k8s/air-gapped-install.md). - -The main requirements are: - -* Syncing container images for ECK and all other {{stack}} components over to a locally-accessible container repository. -* Modifying the ECK helm chart configuration so that ECK is aware that it is supposed to use your offline container repository instead of the public Elastic repository. -* Optionally, disabling ECK telemetry collection in the ECK helm chart. This configuration propagates to all other Elastic components, such as {{kib}}. -* Building your custom deployment container image for the {{artifact-registry}}. -* Building your custom deployment container image for the Elastic Endpoint Artifact Repository. - - -#### 2.2. Elastic Package Registry [air-gapped-k8s-os-elastic-package-registry] - -The container image can be downloaded from the official Elastic Docker repository, as described in the {{fleet}} and {{elastic-agent}} [air-gapped environments](/reference/ingestion-tools/fleet/air-gapped.md) documentation. - -This container would, ideally, run as a Kubernetes deployment. Refer to [Appendix C - EPR Kubernetes Deployment](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-epr-kubernetes-example) for examples. - - -#### 2.3. {{artifact-registry}} [air-gapped-k8s-os-elastic-artifact-registry] - -A custom container would need to be created following similar instructions to setting up a web server in the [self-managed install case](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry). For example, a container file using an NGINX base image could be used to run a build similar to the example described in [Appendix B - {{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry-example). - - -#### 2.4. Elastic Endpoint Artifact Repository [air-gapped-k8s-os-elastic-endpoint-artifact-repository] - -Just like the {{artifact-registry}}. A custom container needs to be created following similar instructions to setting up a web server for the [self-managed install case](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry). - - -#### 2.5. Ironbank Secure Images for Elastic [air-gapped-k8s-os-ironbank-secure-images] - -Besides the public [Elastic container repository](https://www.docker.elastic.co), most {{stack}} container images are also available in Platform One’s [Iron Bank](https://ironbank.dso.mil/repomap?vendorFilters=Elastic&page=1&sort=1). - - -#### 3.0 {{ece}} [air-gapped-ece] - -To install {{ece}} in an air-gapped environment you’ll need to host your own [1.10. {{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry). Refer to the [ECE offline install instructions](/deploy-manage/deploy/cloud-enterprise/air-gapped-install.md) for details. - - -### Appendix A - {{package-registry}} [air-gapped-elastic-package-registry-example] - -The following script generates a SystemD service file on a RHEL 8 system in order to run EPR with Podman in a production environment. - -```shell -#!/usr/bin/env bash - -EPR_BIND_ADDRESS="0.0.0.0" -EPR_BIND_PORT="8443" -EPR_TLS_CERT="/etc/elastic/epr/epr.pem" -EPR_TLS_KEY="/etc/elastic/epr/epr-key.pem" -EPR_IMAGE="docker.elastic.co/package-registry/distribution:9.0.0-beta1" - -podman create \ - --name "elastic-epr" \ - -p "$EPR_BIND_ADDRESS:$EPR_BIND_PORT:$EPR_BIND_PORT" \ - -v "$EPR_TLS_CERT:/etc/ssl/epr.crt:ro" \ - -v "$EPR_TLS_KEY:/etc/ssl/epr.key:ro" \ - -e "EPR_ADDRESS=0.0.0.0:$EPR_BIND_PORT" \ - -e "EPR_TLS_CERT=/etc/ssl/epr.crt" \ - -e "EPR_TLS_KEY=/etc/ssl/epr.key" \ - "$EPR_IMAGE" - -## creates service file in the root directory -# podman generate systemd --new --files --name elastic-epr --restart-policy always -``` - -The following is an example of an actual SystemD service file for an EPR, launched as a Podman service. - -```ini -# container-elastic-epr.service -# autogenerated by Podman 4.1.1 -# Wed Oct 19 13:12:33 UTC 2022 - -[Unit] -Description=Podman container-elastic-epr.service -Documentation=man:podman-generate-systemd(1) -Wants=network-online.target -After=network-online.target -RequiresMountsFor=%t/containers - -[Service] -Environment=PODMAN_SYSTEMD_UNIT=%n -Restart=always -TimeoutStopSec=70 -ExecStartPre=/bin/rm -f %t/%n.ctr-id -ExecStart=/usr/bin/podman run \ - --cidfile=%t/%n.ctr-id \ - --cgroups=no-conmon \ - --rm \ - --sdnotify=conmon \ - -d \ - --replace \ - --name elastic-epr \ - -p 0.0.0.0:8443:8443 \ - -v /etc/elastic/epr/epr.pem:/etc/ssl/epr.crt:ro \ - -v /etc/elastic/epr/epr-key.pem:/etc/ssl/epr.key:ro \ - -e EPR_ADDRESS=0.0.0.0:8443 \ - -e EPR_TLS_CERT=/etc/ssl/epr.crt \ - -e EPR_TLS_KEY=/etc/ssl/epr.key docker.elastic.co/package-registry/distribution:9.0.0-beta1 -ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id -ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id -Type=notify -NotifyAccess=all - -[Install] -WantedBy=default.target -``` - - -### Appendix B - {{artifact-registry}} [air-gapped-elastic-artifact-registry-example] - -The following example script downloads artifacts from the internet to be later served as a private Elastic Package Registry. - -```shell -#!/usr/bin/env bash -set -o nounset -o errexit -o pipefail - -STACK_VERSION=9.0.0-beta1 -ARTIFACT_DOWNLOADS_BASE_URL=https://artifacts.elastic.co/downloads - -DOWNLOAD_BASE_DIR=${DOWNLOAD_BASE_DIR:?"Make sure to set DOWNLOAD_BASE_DIR when running this script"} - -COMMON_PACKAGE_PREFIXES="apm-server/apm-server beats/auditbeat/auditbeat beats/elastic-agent/elastic-agent beats/filebeat/filebeat beats/heartbeat/heartbeat beats/metricbeat/metricbeat beats/osquerybeat/osquerybeat beats/packetbeat/packetbeat cloudbeat/cloudbeat endpoint-dev/endpoint-security fleet-server/fleet-server" - -WIN_ONLY_PACKAGE_PREFIXES="beats/winlogbeat/winlogbeat" - -RPM_PACKAGES="beats/elastic-agent/elastic-agent" -DEB_PACKAGES="beats/elastic-agent/elastic-agent" - -function download_packages() { - local url_suffix="$1" - local package_prefixes="$2" - - local _url_suffixes="$url_suffix ${url_suffix}.sha512 ${url_suffix}.asc" - local _pkg_dir="" - local _dl_url="" - - for _download_prefix in $package_prefixes; do - for _pkg_url_suffix in $_url_suffixes; do - _pkg_dir=$(dirname ${DOWNLOAD_BASE_DIR}/${_download_prefix}) - _dl_url="${ARTIFACT_DOWNLOADS_BASE_URL}/${_download_prefix}-${_pkg_url_suffix}" - (mkdir -p $_pkg_dir && cd $_pkg_dir && curl -O "$_dl_url") - done - done -} - -# and we download -for _os in linux windows; do - case "$_os" in - linux) - PKG_URL_SUFFIX="${STACK_VERSION}-${_os}-x86_64.tar.gz" - ;; - windows) - PKG_URL_SUFFIX="${STACK_VERSION}-${_os}-x86_64.zip" - ;; - *) - echo "[ERROR] Something happened" - exit 1 - ;; - esac - - download_packages "$PKG_URL_SUFFIX" "$COMMON_PACKAGE_PREFIXES" - - if [[ "$_os" = "windows" ]]; then - download_packages "$PKG_URL_SUFFIX" "$WIN_ONLY_PACKAGE_PREFIXES" - fi - - if [[ "$_os" = "linux" ]]; then - download_packages "${STACK_VERSION}-x86_64.rpm" "$RPM_PACKAGES" - download_packages "${STACK_VERSION}-amd64.deb" "$DEB_PACKAGES" - fi -done - - -## selinux tweaks -# semanage fcontext -a -t "httpd_sys_content_t" '/opt/elastic-packages(/.*)?' -# restorecon -Rv /opt/elastic-packages -``` - -The following is an example NGINX configuration for running a web server for the {{artifact-registry}}. - -```shell -user nginx; -worker_processes 2; - -error_log /var/log/nginx/error.log notice; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - sendfile on; - keepalive_timeout 65; - - server { - listen 9080 default_server; - server_name _; - root /opt/elastic-packages; - - location / { - - } - } - -} -``` - - -### Appendix C - EPR Kubernetes Deployment [air-gapped-epr-kubernetes-example] - -The following is a sample EPR Kubernetes deployment YAML file. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: elastic-package-registry - namespace: default - labels: - app: elastic-package-registry -spec: - replicas: 1 - selector: - matchLabels: - app: elastic-package-registry - template: - metadata: - name: elastic-package-registry - labels: - app: elastic-package-registry - spec: - containers: - - name: epr - image: docker.elastic.co/package-registry/distribution:9.0.0-beta1 - ports: - - containerPort: 8080 - name: http - livenessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 20 - periodSeconds: 30 - resources: - requests: - cpu: 125m - memory: 128Mi - limits: - cpu: 1000m - memory: 512Mi - env: - - name: EPR_ADDRESS - value: "0.0.0.0:8080" ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: elastic-package-registry - name: elastic-package-registry -spec: - ports: - - port: 80 - name: http - protocol: TCP - targetPort: http - selector: - app: elastic-package-registry -``` - - -### Appendix D - Agent Integration Guide [air-gapped-agent-integration-guide] - -When configuring any integration in {{agent}}, you need to set up integration settings within whatever policy is ultimately assigned to that agent. - - -#### D.1. Terminology [air-gapped-agent-integration-terminology] - -Note the following terms and definitions: - -Integration -: A variety of optional capabilities that can be deployed on top of the {{stack}}. Refer to [Integrations](https://www.elastic.co/integrations/) to learn more. - -Agent integration -: The integrations that require {{agent}} to run. For example, the Sample Data integration requires only {{es}} and {{kib}} and consists of dashboards, data, and related objects, but the APM integration not only has some {{es}} objects, but also needs {{agent}} to run the APM Server. - -Package -: A set of dependencies (such as dashboards, scripts, and others) for a given integration that, typically, needs to be retrieved from the [Elastic Package Registry](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry) before an integration can be correctly installed and configured. - -Agent policy -: A configuration for the {{agent}} that may include one or more {{agent}} integrations, and configurations for each of those integrations. - - -#### D.2. How to configure [air-gapped-agent-integration-configure] - -There are three ways to configure {{agent}} integrations: - -* [D.2.1. Using the {{kib}} UI](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-kibana) -* [D.2.2. Using the `kibana.yml` config file](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-yml) -* [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) - - -#### D.2.1. Using the {{kib}} UI [air-gapped-agent-integration-configure-kibana] - -**Best option for:** Manual configuration and users who prefer using a UI over scripting. - -**Example:** [Get started with logs and metrics](../../../solutions/observability/infra-and-hosts/get-started-with-system-metrics.md) - -Agent policies and integration settings can be managed using the {{kib}} UI. For example, the following shows the configuration of logging for the System integration in an {{agent}} policy: - -:::{image} ../../../images/elastic-stack-air-gapped-configure-logging.png -:alt: Configuration of a logging integration in an agent policy -:screenshot: -::: - - -#### D.2.2. Using the `kibana.yml` config file [air-gapped-agent-integration-configure-yml] - -**Good option for:** Declarative configuration and users who need reproducible and automated deployments. - -**Example:** [Fleet settings in {{kib}}](kibana://reference/configuration-reference/fleet-settings.md) - -::::{note} -This documentation is still under development; there may be gaps around building custom agent policies. -:::: - - -You can have {{kib}} create {{agent}} policies on your behalf by adding appropriate configuration parameters in the `kibana.yml` settings file, these include: - -`xpack.fleet.packages` -: Takes a list of all integration package names and versions that {{kib}} should download from the {{package-registry}} (EPR). This is done because {{agents}} themselves do not directly fetch packages from the EPR. - -`xpack.fleet.agentPolicies` -: Takes a list of {{agent}} policies in the format expected by the [{{kib}} {{fleet}} HTTP API](/reference/ingestion-tools/fleet/fleet-api-docs.md). Refer to the setting in [Preconfiguration settings](kibana://reference/configuration-reference/fleet-settings.md#_preconfiguration_settings_for_advanced_use_cases) for the format. See also [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api). - -`xpack.fleet.registryUrl` -: Takes a URL of the {{package-registry}} that can be reached by the {{kib}} server. Enable this setting only when deploying in an air-gapped environment. - -Other settings -: You can add other, more discretionary settings for {{fleet}}, {{agents}}, & policies. Refer to [Fleet settings in {{kib}}](kibana://reference/configuration-reference/fleet-settings.md). - - -#### D.2.3. Using the {{kib}} {{fleet}} API [air-gapped-agent-integration-configure-fleet-api] - -**Best option for**: Declarative configuration and users who need reproducible and automated deployments in even the trickiest of environments. - -**Example:** See the following. - -It is possible to use custom scripts that call the {{kib}} {{fleet}} API to create or update policies without restarting {{kib}}, and also allowing for custom error handling and update logic. - -At this time, you can refer to the the [{{kib}} {{fleet}} HTTP API](/reference/ingestion-tools/fleet/fleet-api-docs.md) documentation, however additional resources from public code repositories should be consulted to capture the full set of configuration options available for a given integration. Specifically, many integrations have configuration options such as `inputs` and `data_streams` that are unique. - -In particular, the `*.yml.hbs` templates should be consulted to determine which `vars` are available for configuring a particular integration using the {{kib}} {{fleet}} API. - -* For most Integrations, refer to the README and `*.yml.hbs` files in the appropriate directory in the [elastic/integrations repository](https://github.com/elastic/integrations/tree/main/packages). -* For the APM integration, refer to the README and `*.yml.hbs` files in the [elastic/apm-server repository](https://github.com/elastic/apm-server/tree/main/apmpackage/apm/agent). - diff --git a/raw-migrated-files/stack-docs/elastic-stack/overview.md b/raw-migrated-files/stack-docs/elastic-stack/overview.md deleted file mode 100644 index a579bbab1e..0000000000 --- a/raw-migrated-files/stack-docs/elastic-stack/overview.md +++ /dev/null @@ -1,15 +0,0 @@ -# Overview [overview] - -The products in the [{{stack}}](https://www.elastic.co/products) are designed to be used together and releases are synchronized to simplify the installation and upgrade process. The full stack consists of: - -* [Beats master](asciidocalypse://docs/beats/docs/reference/index.md) -* [APM master](https://www.elastic.co/guide/en/apm/guide/current/index.html) -* [Elasticsearch master](/get-started/index.md) -* [Elasticsearch Hadoop master](elasticsearch-hadoop://reference/index.md) -* [Kibana master](/get-started/the-stack.md) -* [Logstash master](logstash://reference/index.md) - -This guide provides information about installing and upgrading when you are using more than one {{stack}} product. It specifies the recommended order of installation and the steps you need to take to prepare for a stack upgrade. - -For detailed information about breaking changes in 9.0.0-beta1 and install and upgrade instructions for specific components, see the individual product reference guides. - diff --git a/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/get-elastic.md b/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/get-elastic.md deleted file mode 100644 index abb814879e..0000000000 --- a/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/get-elastic.md +++ /dev/null @@ -1,86 +0,0 @@ -# Get Elastic [get-elastic] - -{{ecloud}} enables you set up the {{stack}} and start using the Search, Observability, and Security solutions in minutes. You can deploy globally in any of the dozens of supported regions across Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. - -If you prefer to go the self-managed route, you can install the Elastic Stack on your own hardware or on a public, private, or hybrid cloud. If you are operating multiple clusters, consider using Elastic Cloud Enterprise or Elastic Cloud for Kubernetes to orchestrate your deployments. - -
- - - - - -Orchestrate your deployments with -Elastic Cloud Enterprise - -Install the Elastic Stack distributions for your environment - -Deploy the Elastic Stack with -Elastic Cloud Kubernetes - -Do you want to use Kubernetes to orchestrate your deployments? - -Do you need to manage multiple deployments? -Self-managed: Install and operate the Elastic Stack on your own infrastructure - - -no - - -no - - -yes - - -yes - - -no - -Elastic Cloud Hosted - -Subscribe through the , , or marketplace for 
unified billing -AWS -GCP -Azure - -Sign up directly with Elastic and -get started for free - -Do you want to sign up through elastic.co
 or your cloud provider? - - - -elastic.co - - - -cloud provider - - -yes - -Do you want Elastic to install and manage 
your deployment? - - - - - - - - - - - - - -
- -## Where to go from here [_where_to_go_from_here] - -* [Overview of the {{stack}}](../../../get-started/the-stack.md) -* [Adding your data](/manage-data/ingest.md) -* [{{stack}} subscriptions](https://www.elastic.co/subscriptions) -* [Elastic pricing](https://www.elastic.co/pricing/) - diff --git a/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/index.md b/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/index.md deleted file mode 100644 index 238baca9b5..0000000000 --- a/raw-migrated-files/tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Starting with the Elasticsearch Platform and its Solutions - -Migrated files from the Starting with the Elasticsearch Platform and its Solutions book. \ No newline at end of file diff --git a/raw-migrated-files/toc.yml b/raw-migrated-files/toc.yml index 1675c508f6..6cdf4d7419 100644 --- a/raw-migrated-files/toc.yml +++ b/raw-migrated-files/toc.yml @@ -183,22 +183,12 @@ toc: - file: docs-content/serverless/security-detection-engine-overview.md - file: docs-content/serverless/security-triage-alerts-with-elastic-ai-assistant.md - file: docs-content/serverless/security-vuln-management-faq.md - - file: docs-content/serverless/spaces.md - file: docs-content/serverless/what-is-observability-serverless.md - file: elasticsearch-hadoop/elasticsearch-hadoop/index.md children: - file: elasticsearch-hadoop/elasticsearch-hadoop/doc-sections.md - file: elasticsearch/elasticsearch-reference/index.md children: - - file: elasticsearch/elasticsearch-reference/autoscaling-deciders.md - - file: elasticsearch/elasticsearch-reference/autoscaling-fixed-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-frozen-existence-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-frozen-shards-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-frozen-storage-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-machine-learning-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-proactive-storage-decider.md - - file: elasticsearch/elasticsearch-reference/autoscaling-reactive-storage-decider.md - - file: elasticsearch/elasticsearch-reference/change-passwords-native-users.md - file: elasticsearch/elasticsearch-reference/configuring-stack-security.md - file: elasticsearch/elasticsearch-reference/data-management.md - file: elasticsearch/elasticsearch-reference/documents-indices.md @@ -224,12 +214,8 @@ toc: - file: elasticsearch/elasticsearch-reference/security-files.md - file: elasticsearch/elasticsearch-reference/security-limitations.md - file: elasticsearch/elasticsearch-reference/semantic-search-inference.md - - file: elasticsearch/elasticsearch-reference/setup.md - file: elasticsearch/elasticsearch-reference/shard-allocation-filtering.md - file: elasticsearch/elasticsearch-reference/shard-request-cache.md - - file: elasticsearch/elasticsearch-reference/snapshot-restore.md - - file: elasticsearch/elasticsearch-reference/snapshots-restore-snapshot.md - - file: elasticsearch/elasticsearch-reference/xpack-autoscaling.md - file: ingest-docs/fleet/index.md children: - file: ingest-docs/fleet/beats-agent-comparison.md @@ -270,14 +256,9 @@ toc: - file: security-docs/security/index.md - file: stack-docs/elastic-stack/index.md children: - - file: stack-docs/elastic-stack/air-gapped-install.md - file: stack-docs/elastic-stack/installing-stack-demo-self.md - - file: stack-docs/elastic-stack/overview.md - file: stack-docs/elastic-stack/upgrade-elastic-stack-for-elastic-cloud.md - file: stack-docs/elastic-stack/upgrading-elastic-stack-on-prem.md - file: stack-docs/elastic-stack/upgrading-elastic-stack.md - file: stack-docs/elastic-stack/upgrading-elasticsearch.md - file: stack-docs/elastic-stack/upgrading-kibana.md - - file: tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/index.md - children: - - file: tech-content/starting-with-the-elasticsearch-platform-and-its-solutions/get-elastic.md From 9c16fd2a3087f13db08b473cc6de344938e1541d Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 13:56:33 -0400 Subject: [PATCH 21/43] more cleanup --- raw-migrated-files/toc.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/raw-migrated-files/toc.yml b/raw-migrated-files/toc.yml index 6cdf4d7419..298ade5c7e 100644 --- a/raw-migrated-files/toc.yml +++ b/raw-migrated-files/toc.yml @@ -229,7 +229,6 @@ toc: - file: kibana/kibana/console-kibana.md - file: kibana/kibana/elasticsearch-mutual-tls.md - file: kibana/kibana/esql.md - - file: kibana/kibana/install.md - file: kibana/kibana/logging-settings.md - file: kibana/kibana/reporting-production-considerations.md - file: kibana/kibana/search-ai-assistant.md @@ -237,7 +236,6 @@ toc: - file: kibana/kibana/secure-settings.md - file: kibana/kibana/Security-production-considerations.md - file: kibana/kibana/set-time-filter.md - - file: kibana/kibana/setup.md - file: kibana/kibana/upgrade-migrations-rolling-back.md - file: kibana/kibana/upgrade.md - file: kibana/kibana/using-kibana-with-security.md From 3c0e693018d48de0e9f24579d547ad759c213478 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 14:38:33 -0400 Subject: [PATCH 22/43] next steps --- .../_snippets/install-kib-next-steps.md | 5 ++ .../_snippets/install-next-steps.md | 10 +++- .../self-managed/configure-elasticsearch.md | 2 +- .../deploy/self-managed/configure-kibana.md | 3 +- ...tall-kibana-from-archive-on-linux-macos.md | 5 ++ .../self-managed/install-kibana-on-windows.md | 5 ++ .../install-kibana-with-debian-package.md | 5 ++ .../install-kibana-with-docker.md | 23 ++++----- .../self-managed/install-kibana-with-rpm.md | 5 ++ .../deploy/self-managed/install-kibana.md | 50 ++++++------------- 10 files changed, 62 insertions(+), 51 deletions(-) create mode 100644 deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md diff --git a/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md new file mode 100644 index 0000000000..8b1e806790 --- /dev/null +++ b/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md @@ -0,0 +1,5 @@ +You now have a basic {{kib}} instance set up. Consider the following next steps: + +* Learn how to [configure {{kib}}](/deploy-manage/deploy/self-managed/configure-kibana.md). +* Learn how to [access {{kib}}](/deploy-manage/deploy/self-managed/access-kibana.md). +* Explore [key configuration topics](/deploy-manage/deploy/self-managed/configure-kibana.md#additional-guides) to learn how to secure and manage {{kib}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md index af594640da..38ac471011 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -1,4 +1,12 @@ You now have a test {{es}} environment set up. Before you start serious development or go into production with {{es}}, you must do some additional setup: * Learn how to [configure {{es}}](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). -* Configure [important {{es}} settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). \ No newline at end of file +* Configure [important {{es}} settings](/deploy-manage/deploy/self-managed/important-settings-configuration.md). +* Install [{{kib}}](/deploy-manage/deploy/self-managed/install-kibana.md), the user interface for all Elastic solutions. + +You can also do the following: + +* Consider installing [additional {{stack}} components](/get-started/the-stack.md). +* Learn how to [ingest data into {{es}}](/manage-data/index.md). +* Learn about [production considerations](/deploy-manage/production-guidance.md). +* Explore [other topics](/deploy-manage/deploy/self-managed#other-important-sections.md) to learn how to secure and manage your cluster. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index 8e84e1efd7..a9563c97fe 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -131,7 +131,7 @@ Static settings can only be configured on an unstarted or shut down node using ` Static settings must be set on every relevant node in the cluster. -## Additional guides +## Additional topics Refer to the following documentation to learn how to perform key configuration tasks for {{es}}: diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md index 649f608772..9d2cba6aa0 100644 --- a/deploy-manage/deploy/self-managed/configure-kibana.md +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -31,12 +31,13 @@ Environment variables can be injected into configuration using `${MY_ENV_VAR}` s For a complete list of settings that you can apply to {{kib}}, refer to [{{kib}} configuration reference](kibana:///reference/configuration-reference.md). -## Additional guides +## Additional topics Refer to the following documentation to learn how to perform key configuration tasks for {{kib}}: * [Configure SSL certificates](/deploy-manage/security/set-up-basic-security-plus-https.md#encrypt-kibana-browser) to encrypt traffic between client browsers and {{kib}} * [Enable authentication providers](/deploy-manage/users-roles/cluster-or-deployment-auth/kibana-authentication.md) for {{kib}} +* Configure the {{kib}} [reporting feature](/deploy-manage/deploy/kibana-reporting-configuration.md) * Use [Spaces](/deploy-manage/manage-spaces.md) to organize content in {{kib}}, and restrict access to this content to specific users * Use [Connectors](/deploy-manage/manage-connectors.md) to manage connection information between {{es}}, {{kib}}, and third-party systems * Present a [user access agreement](/deploy-manage/users-roles/cluster-or-deployment-auth/access-agreement.md) when logging on to {{kib}} diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 8d3fdb4079..41ad7dda99 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -111,3 +111,8 @@ This is very convenient because you don’t have to create any directories to st | config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | | data | The location of the data files written to disk by {{kib}} and its plugins | `$KIBANA_HOME\data` | | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `$KIBANA_HOME\plugins` | | + +## Next steps + +:::{include} _snippets/install-kib-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md index ae6fb78ae3..8f7db869bc 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -70,3 +70,8 @@ This is very convenient because you don’t have to create any directories to st | config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | | | data | `The location of the data files written to disk by {{kib}} and its plugins` | `$KIBANA_HOME\data` | | | plugins | `Plugin files location. Each plugin will be contained in a subdirectory.` | `$KIBANA_HOME\plugins` | + +## Next steps + +:::{include} _snippets/install-kib-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index 66b7cf91de..66b6efd4de 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -143,3 +143,8 @@ The Debian package places config files, logs, and the data directory in the appr | data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | + +## Next steps + +:::{include} _snippets/install-kib-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md index 9a47e3d422..79f789e750 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md @@ -188,19 +188,13 @@ All information that you include in environment variables is visible through the Some example translations are shown here: -**Environment Variable** -: **{{kib}} Setting** +| Environment variable | {{kib}} setting | +| --- | --- | +| `SERVER_NAME` | `server.name` | +| `SERVER_BASEPATH` | `server.basePath`| +| `ELASTICSEARCH_HOSTS` | `elasticsearch.hosts` | -`SERVER_NAME` -: `server.name` - -`SERVER_BASEPATH` -: `server.basePath` - -`ELASTICSEARCH_HOSTS` -: `elasticsearch.hosts` - -In general, any setting listed in [*Configure {{kib}}*](configure.md) can be configured with this technique. +In general, any setting listed in [](configure-kibana.md) can be configured with this technique. Supplying array options can be tricky. The following example shows the syntax for providing an array to `ELASTICSEARCH_HOSTS`. @@ -240,3 +234,8 @@ These settings are defined in the default `kibana.yml`. They can be overridden w ::::{important} If replacing `kibana.yml` with a custom version, be sure to copy the defaults to the custom file if you want to retain them. If not, they will be "masked" by the new file. :::: + +## Next steps + +:::{include} _snippets/install-kib-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 164ed4a949..0cdd131bac 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -134,3 +134,8 @@ The RPM places config files, logs, and the data directory in the appropriate loc | data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | + +## Next steps + +:::{include} _snippets/install-kib-next-steps.md +::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index f9a8c95cb5..ed62b27b75 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -11,19 +11,19 @@ sub: # Install {{kib}} -This section includes information on how to setup {{kib}} and get it running, including: +{{kib}} provides the user interface for all Elastic solutions. It’s a powerful tool for visualizing and analyzing your data, and for managing and monitoring the {{stack}}. Although {{kib}} is not required to use {{es}}, it's required for most use cases. +This section includes information on how to set up {{kib}} and get it running, including: -* Downloading -* Installing -* Starting -* Configuring -* Upgrading +* Downloading and installing {{kib}} +* Enrolling {{kib}} with an {{es}} cluster +* Configuring {{kib}} +To quickly set up {{es}} and {{kib}} in Docker for local development or testing, jump to [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). ## Supported platforms [supported-platforms] -Packages of {{kib}} are provided for and tested against Linux, Darwin, and Windows. Since {{kib}} runs on Node.js, we include the necessary Node.js binaries for these platforms. Running {{kib}} against a separately maintained version of Node.js is not supported. +Packages of {{kib}} are provided for and tested against Linux, Darwin, and Windows. Because {{kib}} runs on Node.js, we include the necessary Node.js binaries for these platforms. Running {{kib}} against a separately maintained version of Node.js is not supported. To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} for Linux ships with a custom build of Node.js with glibc 2.17 support. For details, see [Custom builds of Node.js](kibana://extend/upgrading-nodejs.md#custom-nodejs-builds). @@ -31,35 +31,13 @@ To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} f {{kib}} is provided in the following package formats: -`tar.gz`/`zip` -: The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. - - The `zip` package is the only supported package for Windows. - - [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md) or [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md) - - -`deb` -: The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. - - [Install with Debian package](/deploy-manage/deploy/self-managed/install-with-debian-package.md) - - -`rpm` -: The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. - - [Install with RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) - - -`docker` -: Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. - - [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-with-docker.md) - - -::::{important} -If your {{es}} installation is protected by [{{stack-security-features}}](/deploy-manage/security.md) see [Configuring security in {{kib}}](/deploy-manage/security.md) for additional setup instructions. -:::: +| Format | Description | Instructions | +| --- | --- | --- | +| `tar.gz` | The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. | [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md)| +| `zip` | The `zip` package is the only supported package for Windows.| [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md)| +| `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. | [Install with Debian package](/deploy-manage/deploy/self-managed/install-with-debian-package.md) | +| `rpm` | The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. | [Install with RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) | +| `docker` | Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. | [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-with-docker.md) | ## {{es}} version [elasticsearch-version] From a8d5b237792ed287834ecbb24380eb0ebe6f2bd9 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 14:55:07 -0400 Subject: [PATCH 23/43] tools + apis --- .../deploy/self-managed/tools-apis.md | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/deploy-manage/deploy/self-managed/tools-apis.md b/deploy-manage/deploy/self-managed/tools-apis.md index 0c982406ac..095e875e77 100644 --- a/deploy-manage/deploy/self-managed/tools-apis.md +++ b/deploy-manage/deploy/self-managed/tools-apis.md @@ -6,8 +6,24 @@ applies_to: --- # Tools and APIs for self-managed clusters -% What needs to be done: Write from scratch +Review key resources that can be used to interact with and manage self-managed clusters. -% GitHub issue: https://github.com/elastic/docs-projects/issues/310 +## APIs -⚠️ **This page is a work in progress.** ⚠️ \ No newline at end of file +* [{{es}} API](https://www.elastic.co/docs/api/doc/elasticsearch/): The core API for interacting with a self-managed {{es}} cluster, or any cluster running {{stack}}. Configure {{es}} functionality and settings, query your data, and more. + + Refer to [REST APIs](/reference/elasticsearch/rest-apis.md) to learn about API conventions and view API usage examples. +* [{{kib}} API](https://www.elastic.co/docs/api/doc/kibana/): manage {{kib}} resources such as connectors, data views, and saved objects. + +:::{tip} +Learn how to [generate API keys for your self-managed cluster](/deploy-manage/api-keys/elasticsearch-api-keys.md). +::: + +## Clients + +* [{{es}} clients](/reference/elasticsearch/clients/index.md): Interact with {{es}} programmatically to integrate {{es}} into your app or website. + +## Other + +* [{{es}} command line tools](elasticsearch://reference/elasticsearch/command-line-tools.md): Utilities for configuring security and performing other tasks from the command line. +* [Plugins](elasticsearch://reference/elasticsearch/elasticsearch-plugins.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file From f4100231d3914700fac055116bcb97a62e4efeb7 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 15:04:14 -0400 Subject: [PATCH 24/43] little fix --- .../install-kibana-from-archive-on-linux-macos.md | 4 ++-- .../stack-docs/elastic-stack/installing-stack-demo-self.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 41ad7dda99..130c505c81 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -38,7 +38,7 @@ tar -xzf kibana-{{stack-version}}-linux-x86_64.tar.gz cd kibana-{{stack-version}}/ <2> ``` -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-linux-x86_64.tar.gz: OK`. +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana--linux-x86_64.tar.gz: OK`. 2. This directory is known as `$KIBANA_HOME`. ## MacOS [install-darwin64] @@ -52,7 +52,7 @@ tar -xzf kibana-{{stack-version}}-darwin-x86_64.tar.gz cd kibana-{{stack-version}}/ <2> ``` -1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana-{{stack-version}}-darwin-x86_64.tar.gz: OK`. +1. Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output `kibana--darwin-x86_64.tar.gz: OK`. 2. This directory is known as `$KIBANA_HOME`. diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index 0851994f88..9cc6d1f5c4 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -90,7 +90,7 @@ To begin, use RPM to install {{es}} on the first host. This initial {{es}} insta shasum -a 512 -c elasticsearch-9.0.0-beta1-x86_64.rpm.sha512 ``` - The command should return: `elasticsearch-{{version}}-x86_64.rpm: OK`. + The command should return: `elasticsearch--x86_64.rpm: OK`. 6. Run the {{es}} install command: @@ -406,7 +406,7 @@ As with {{es}}, you can use RPM to install {{kib}} on another host. You can find shasum -a 512 -c kibana-9.0.0-beta1-x86_64.rpm.sha512 ``` - The command should return: `kibana-{{version}}-x86_64.rpm: OK`. + The command should return: `kibana--x86_64.rpm: OK`. 5. Run the {{kib}} install command: From 9f0667889eea935ceb5168145a19e2f0ff5ea27b Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 15:29:00 -0400 Subject: [PATCH 25/43] fewer redirects --- deploy-manage/deploy/_snippets/installation-order.md | 2 +- deploy-manage/deploy/self-managed.md | 4 ++-- deploy-manage/deploy/self-managed/air-gapped-install.md | 2 +- .../{deploy-cluster.md => installing-elasticsearch.md} | 4 ++-- deploy-manage/toc.yml | 2 +- reference/ingestion-tools/fleet/elastic-agent-container.md | 2 +- .../fleet/running-on-kubernetes-managed-by-fleet.md | 2 +- .../ingestion-tools/fleet/running-on-kubernetes-standalone.md | 2 +- troubleshoot/elasticsearch/hotspotting.md | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) rename deploy-manage/deploy/self-managed/{deploy-cluster.md => installing-elasticsearch.md} (97%) diff --git a/deploy-manage/deploy/_snippets/installation-order.md b/deploy-manage/deploy/_snippets/installation-order.md index 5c456ea67f..4a3093953f 100644 --- a/deploy-manage/deploy/_snippets/installation-order.md +++ b/deploy-manage/deploy/_snippets/installation-order.md @@ -3,7 +3,7 @@ If you're deploying the {{stack}} in a self-managed cluster, then install the {{ * {{es}} * {{kib}} * [Logstash](logstash://reference/index.md) -* [{{agent}}](/reference/ingestion-tools/fleet.md) or [Beats](asciidocalypse://docs/beats/docs/reference/index.md) +* [{{agent}}](/reference/ingestion-tools/fleet/index.md) or [Beats](asciidocalypse://docs/beats/docs/reference/index.md) * [APM](/solutions/observability/apps/application-performance-monitoring-apm.md) * [Elasticsearch Hadoop](elasticsearch-hadoop://reference/index.md) diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 6053cd1dd4..a500b9f46e 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -40,9 +40,9 @@ This section covers the following tasks: Learn how to install and configure {{es}}. {{es}} is the distributed search and analytics engine, scalable data store, and vector database at the heart of all Elastic solutions. -* [](/deploy-manage/deploy/self-managed/deploy-cluster.md) +* [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md) * [](/deploy-manage/deploy/self-managed/important-system-configuration.md): Prepare your environment for an {{es}} installation. - * [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md): Install and run {{es}} using one of our install packages or container images. + * [](/deploy-manage/deploy/self-managed/deploy-cluster#installation-methods.md): Install and run {{es}} using one of our install packages or container images. * [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md): Quickly set up {{es}} and {{kib}} in Docker for local development or testing. * [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md): Learn how to make configuration changes to {{es}} * [](/deploy-manage/deploy/self-managed/important-settings-configuration.md): Learn about key settings required for production environments. diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index 5c05aaf65b..265e9cef66 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -17,7 +17,7 @@ Refer to the section for each Elastic component for air-gapped installation conf ## {{es}} [air-gapped-elasticsearch] -Air-gapped install of {{es}} may require additional steps in order to access some of the features. General install and configuration guides are available in the [{{es}} install documentation](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). +Air-gapped install of {{es}} may require additional steps in order to access some of the features. General install and configuration guides are available in [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). Specifically: diff --git a/deploy-manage/deploy/self-managed/deploy-cluster.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md similarity index 97% rename from deploy-manage/deploy/self-managed/deploy-cluster.md rename to deploy-manage/deploy/self-managed/installing-elasticsearch.md index f731d3fb6f..168d6aea59 100644 --- a/deploy-manage/deploy/self-managed/deploy-cluster.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -28,8 +28,8 @@ To quickly set up {{es}} and {{kib}} in Docker for local development or testing, If you want to install and manage {{es}} yourself, you can: -* Run {{es}} using a [Linux, MacOS, or Windows install package](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-install-packages). -* Run {{es}} in a [Docker container](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-docker-images). +* Run {{es}} using a [Linux, MacOS, or Windows install package](#elasticsearch-install-packages). +* Run {{es}} in a [Docker container](#elasticsearch-docker-images). ::::{tip} To try out {{stack}} on your own machine, we recommend using Docker and running both {{es}} and {{kib}}. For more information, see [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). This setup is not suitable for production use. diff --git a/deploy-manage/toc.yml b/deploy-manage/toc.yml index 08a4e0d07b..e4ec4f8799 100644 --- a/deploy-manage/toc.yml +++ b/deploy-manage/toc.yml @@ -290,7 +290,7 @@ toc: - file: deploy/cloud-on-k8s/tools-apis.md - file: deploy/self-managed.md children: - - file: deploy/self-managed/deploy-cluster.md + - file: deploy/self-managed/installing-elasticsearch.md children: - file: deploy/self-managed/local-development-installation-quickstart.md - file: deploy/self-managed/important-system-configuration.md diff --git a/reference/ingestion-tools/fleet/elastic-agent-container.md b/reference/ingestion-tools/fleet/elastic-agent-container.md index 73f28fcf08..ad8b3c8e66 100644 --- a/reference/ingestion-tools/fleet/elastic-agent-container.md +++ b/reference/ingestion-tools/fleet/elastic-agent-container.md @@ -27,7 +27,7 @@ Considerations: ::: :::{tab-item} Self-managed - To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/deploy-cluster.md). + To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). ::: :::: diff --git a/reference/ingestion-tools/fleet/running-on-kubernetes-managed-by-fleet.md b/reference/ingestion-tools/fleet/running-on-kubernetes-managed-by-fleet.md index 1513650325..ecc679f688 100644 --- a/reference/ingestion-tools/fleet/running-on-kubernetes-managed-by-fleet.md +++ b/reference/ingestion-tools/fleet/running-on-kubernetes-managed-by-fleet.md @@ -19,7 +19,7 @@ mapped_pages: :::{tab-item} Self-managed - To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/deploy-cluster.md). + To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). ::: :::: diff --git a/reference/ingestion-tools/fleet/running-on-kubernetes-standalone.md b/reference/ingestion-tools/fleet/running-on-kubernetes-standalone.md index 953f8e2124..fc31230260 100644 --- a/reference/ingestion-tools/fleet/running-on-kubernetes-standalone.md +++ b/reference/ingestion-tools/fleet/running-on-kubernetes-standalone.md @@ -17,7 +17,7 @@ mapped_pages: ::: :::{tab-item} Self-managed - To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/deploy-cluster.md). + To install and run {{es}} and {{kib}}, see [Installing the {{stack}}](/deploy-manage/deploy/self-managed/installing-elasticsearch.md). ::: :::: diff --git a/troubleshoot/elasticsearch/hotspotting.md b/troubleshoot/elasticsearch/hotspotting.md index 50789a00fe..ed57a0f1f5 100644 --- a/troubleshoot/elasticsearch/hotspotting.md +++ b/troubleshoot/elasticsearch/hotspotting.md @@ -52,7 +52,7 @@ Historically, clusters experience hot spotting mainly as an effect of hardware, Here are some common improper hardware setups which may contribute to hot spotting: * Resources are allocated non-uniformly. For example, if one hot node is given half the CPU of its peers. {{es}} expects all nodes on a [data tier](../../manage-data/lifecycle/data-tiers.md) to share the same hardware profiles or specifications. -* Resources are consumed by another service on the host, including other {{es}} nodes. Refer to our [dedicated host](../../deploy-manage/deploy/self-managed/deploy-cluster.md#dedicated-host) recommendation. +* Resources are consumed by another service on the host, including other {{es}} nodes. Refer to our [dedicated host](../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#dedicated-host) recommendation. * Resources experience different network or disk throughputs. For example, if one node’s I/O is lower than its peers. Refer to [Use faster hardware](../../deploy-manage/production-guidance/optimize-performance/indexing-speed.md) for more information. * A JVM that has been configured with a heap larger than 31GB. Refer to [Set the JVM heap size](elasticsearch://reference/elasticsearch/jvm-settings.md#set-jvm-heap-size) for more information. * Problematic resources uniquely report [memory swapping](../../deploy-manage/deploy/self-managed/setup-configuration-memory.md). From 9840696c0c4c6da97c99a58d921a434af50dee2f Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 15:37:15 -0400 Subject: [PATCH 26/43] fix errors --- deploy-manage/deploy/self-managed/install-kibana.md | 2 +- deploy-manage/deploy/self-managed/installing-elasticsearch.md | 4 +--- deploy-manage/security/secure-saved-objects.md | 2 +- explore-analyze/report-and-share.md | 2 +- .../report-and-share/reporting-troubleshooting-pdf.md | 2 +- raw-migrated-files/kibana/kibana/secure-reporting.md | 2 +- redirects.yml | 2 ++ 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index ed62b27b75..3cc46614a8 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -37,7 +37,7 @@ To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} f | `zip` | The `zip` package is the only supported package for Windows.| [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md)| | `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. | [Install with Debian package](/deploy-manage/deploy/self-managed/install-with-debian-package.md) | | `rpm` | The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. | [Install with RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) | -| `docker` | Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. | [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-with-docker.md) | +| `docker` | Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. | [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-kibana-with-docker.md) | ## {{es}} version [elasticsearch-version] diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index 168d6aea59..df1ca6f20a 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -13,9 +13,7 @@ applies_to: # Deploy an {{es}} cluster -% Doesn't exist -% * [/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md](/raw-migrated-files/stack-docs/elastic-stack/installing-elastic-stack.md) - +$$$stack-security-certificates$$$ This section includes information on how to set up {{es}} and get it running, including: diff --git a/deploy-manage/security/secure-saved-objects.md b/deploy-manage/security/secure-saved-objects.md index cdb9461a08..e6a7a58b70 100644 --- a/deploy-manage/security/secure-saved-objects.md +++ b/deploy-manage/security/secure-saved-objects.md @@ -58,7 +58,7 @@ At some point, you might want to dispose of old encryption keys completely. Make ## Docker configuration [encryption-key-docker-configuration] -It’s also possible to configure the encryption keys using [Docker environment variables](../deploy/self-managed/install-with-docker.md#environment-variable-config). +It’s also possible to configure the encryption keys using [Docker environment variables](../deploy/self-managed/install-kibana-with-docker.md#environment-variable-config). Docker environment variable examples: diff --git a/explore-analyze/report-and-share.md b/explore-analyze/report-and-share.md index cfdf94c0d3..c0a1960b5a 100644 --- a/explore-analyze/report-and-share.md +++ b/explore-analyze/report-and-share.md @@ -139,7 +139,7 @@ For more information on using Elasticsearch APIs directly, see [Scroll API](http We recommend using PNG/PDF reports to export moderate amounts of data only. The feature enables a high-level export capability, but it’s not intended for bulk export. If you need to export several pages of image data, consider using multiple report jobs to export a small number of pages at a time. If the screenshot of exported dashboard contains a large number of pixels, consider splitting the large dashboard into smaller artifacts to use less memory and CPU resources. -For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](/deploy-manage/deploy/self-managed/install-with-docker.md) or using [Elastic Cloud](https://cloud.elastic.co). +For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](/deploy-manage/deploy/self-managed/install-kibana-with-docker.md) or using [Elastic Cloud](https://cloud.elastic.co). ## Create JSON files [download-as-json] diff --git a/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md b/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md index 738ae7a892..0a21fd5d1b 100644 --- a/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md +++ b/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md @@ -15,7 +15,7 @@ mapped_pages: ::::{note} We recommend using PNG/PDF reports to export moderate amounts of data only. The feature enables a high-level export capability, but it’s not intended for bulk export. If you need to export several pages of image data, consider using multiple report jobs to export a small number of pages at a time. If the screenshot of exported dashboard contains a large number of pixels, consider splitting the large dashboard into smaller artifacts to use less memory and CPU resources. -For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](../../deploy-manage/deploy/self-managed/install-with-docker.md) or using [Elastic Cloud](https://cloud.elastic.co). +For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](../../deploy-manage/deploy/self-managed/install-kibana-with-docker.md) or using [Elastic Cloud](https://cloud.elastic.co). :::: diff --git a/raw-migrated-files/kibana/kibana/secure-reporting.md b/raw-migrated-files/kibana/kibana/secure-reporting.md index 764f7d7bbb..6b0ac3a4ea 100644 --- a/raw-migrated-files/kibana/kibana/secure-reporting.md +++ b/raw-migrated-files/kibana/kibana/secure-reporting.md @@ -13,7 +13,7 @@ Kibana PNG/PDF Reporting uses a custom binary of headless Chromium, and support * Linux versions that are in end-of-life phase are not supported. * Linux systems with SELinux or fapolicyd are not supported. -Before upgrading Kibana in a production environment, we encourage you to test your screenshotting use cases in a pre-production environment to make sure your hosts support our latest build of Chromium. For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](../../../deploy-manage/deploy/self-managed/install-with-docker.md), or using [Elastic Cloud](https://cloud.elastic.co). +Before upgrading Kibana in a production environment, we encourage you to test your screenshotting use cases in a pre-production environment to make sure your hosts support our latest build of Chromium. For the most reliable configuration of PDF/PNG {{report-features}}, consider installing {{kib}} using [Docker](../../../deploy-manage/deploy/self-managed/install-kibana-with-docker.md), or using [Elastic Cloud](https://cloud.elastic.co). :::: diff --git a/redirects.yml b/redirects.yml index 9f65b82505..837c181fd5 100644 --- a/redirects.yml +++ b/redirects.yml @@ -18,6 +18,8 @@ redirects: anchors: 'spaces-control-feature-visibility': 'deploy-manage/deploy/cloud-enterprise/deploy-large-installation-cloud.md': '!deploy-manage/deploy/cloud-enterprise/deploy-large-installation.md' + 'deploy-manage/deploy/self-managed/install-with-docker.md': '!deploy-manage/deploy/self-managed/install-kibana-with-docker.md' + 'deploy-manage/deploy/self-managed/deploy-cluster.md': '!deploy-manage/deploy/self-managed/installing-elasticsearch.md' ## explore-analyze 'explore-analyze/machine-learning/nlp/ml-nlp-auto-scale.md': '!deploy-manage/autoscaling/trained-model-autoscaling.md' From cecb26a30bc59614ebd501e952edcef9177ed2d5 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 15:44:17 -0400 Subject: [PATCH 27/43] use snippet --- ...cal-development-installation-quickstart.md | 58 +------------------ 1 file changed, 2 insertions(+), 56 deletions(-) diff --git a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md index 8b56c3a546..1b718964fa 100644 --- a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md +++ b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md @@ -7,59 +7,5 @@ applies_to: self: --- - - -# Local development installation (quickstart) [run-elasticsearch-locally] - - -::::{warning} -**DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS** - -The instructions on this page are for **local development only**. Do not use this configuration for production deployments, because it is not secure. Refer to [deployment options](../../../get-started/deployment-options.md) for a list of production deployment options. - -:::: - - -Quickly set up {{es}} and {{kib}} in Docker for local development or testing, using the [`start-local` script](https://github.com/elastic/start-local?tab=readme-ov-file#-try-elasticsearch-and-kibana-locally). - -This setup comes with a one-month trial license that includes all Elastic features. After the trial period, the license reverts to **Free and open - Basic**. Refer to [Elastic subscriptions](https://www.elastic.co/subscriptions) for more information. - - -## Prerequisites [local-dev-prerequisites] - -* If you don’t have Docker installed, [download and install Docker Desktop](https://www.docker.com/products/docker-desktop) for your operating system. -* If you’re using Microsoft Windows, then install [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install). - - -## Run `start-local` [local-dev-quick-start] - -To set up {{es}} and {{kib}} locally, run the `start-local` script: - -```sh -curl -fsSL https://elastic.co/start-local | sh -``` - -This script creates an `elastic-start-local` folder containing configuration files and starts both {{es}} and {{kib}} using Docker. - -After running the script, you can access Elastic services at the following endpoints: - -* **{{es}}**: [http://localhost:9200](http://localhost:9200) -* **{{kib}}**: [http://localhost:5601](http://localhost:5601) - -The script generates a random password for the `elastic` user, and an API key, stored in the `.env` file. - -::::{warning} -This setup is for local testing only. HTTPS is disabled, and Basic authentication is used for {{es}}. For security, {{es}} and {{kib}} are accessible only through `localhost`. - -:::: - - - -## Learn more [local-dev-additional-info] - -For more detailed information about the `start-local` setup, refer to the [README on GitHub](https://github.com/elastic/start-local). Learn about customizing the setup, logging, and more. - - -## Next steps [local-dev-next-steps] - -Use our [quick start guides](https://www.elastic.co/guide/en/elasticsearch/reference/current/quickstart.html) to learn the basics of {{es}}. +:::{include} /deploy-manage/deploy/self-managed/_snippets/start-local.md +::: From 36d2f93c16e85dfb6944aca1161b7e76dcb6a334 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 16:06:29 -0400 Subject: [PATCH 28/43] fix links" git push --- ...cal-development-installation-quickstart.md | 2 ++ .../alerts-cases/alerts/alerting-setup.md | 2 +- .../alerts/rule-action-variables.md | 2 +- .../alerts-cases/cases/manage-cases.md | 4 +-- .../alerts-cases/cases/setup-cases.md | 2 +- .../find-and-organize/saved-objects.md | 4 +-- .../reporting-troubleshooting.md | 2 +- explore-analyze/visualize/manage-panels.md | 4 +-- .../visualize/maps/maps-connect-to-ems.md | 26 +++++++++---------- .../visualize/maps/maps-troubleshooting.md | 2 +- explore-analyze/visualize/maps/tile-layer.md | 2 +- .../Security-production-considerations.md | 2 +- .../installing-stack-demo-self.md | 2 +- raw-migrated-files/toc.yml | 1 - redirects.yml | 1 + .../apps/fleet-managed-apm-server.md | 2 +- .../configure-access-to-cases.md | 2 +- .../logs/configure-data-sources.md | 2 +- .../detections-requirements.md | 4 +-- .../view-detection-alert-details.md | 2 +- .../investigate/cases-requirements.md | 2 +- .../security/investigate/open-manage-cases.md | 2 +- .../elasticsearch/mapping-explosion.md | 2 +- troubleshoot/kibana/access.md | 2 +- troubleshoot/kibana/maps.md | 2 +- troubleshoot/kibana/reporting.md | 2 +- 26 files changed, 42 insertions(+), 40 deletions(-) diff --git a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md index 1b718964fa..48e447d59c 100644 --- a/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md +++ b/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md @@ -7,5 +7,7 @@ applies_to: self: --- +# Local development installation (quickstart) [run-elasticsearch-locally] + :::{include} /deploy-manage/deploy/self-managed/_snippets/start-local.md ::: diff --git a/explore-analyze/alerts-cases/alerts/alerting-setup.md b/explore-analyze/alerts-cases/alerts/alerting-setup.md index bf378a6435..a58f326a89 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-setup.md +++ b/explore-analyze/alerts-cases/alerts/alerting-setup.md @@ -16,7 +16,7 @@ mapped_pages: If you are using an **on-premises** {{stack}} deployment: * In the `kibana.yml` configuration file, add the [`xpack.encryptedSavedObjects.encryptionKey`](kibana://reference/configuration-reference/alerting-settings.md#general-alert-action-settings) setting. -* For emails to have a footer with a link back to {{kib}}, set the [`server.publicBaseUrl`](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) configuration setting. +* For emails to have a footer with a link back to {{kib}}, set the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) configuration setting. If you are using an **on-premises** {{stack}} deployment with [**security**](../../../deploy-manage/security.md): diff --git a/explore-analyze/alerts-cases/alerts/rule-action-variables.md b/explore-analyze/alerts-cases/alerts/rule-action-variables.md index c01d7adfc0..88471266cf 100644 --- a/explore-analyze/alerts-cases/alerts/rule-action-variables.md +++ b/explore-analyze/alerts-cases/alerts/rule-action-variables.md @@ -34,7 +34,7 @@ All rule types pass the following variables: : The date the rule scheduled the action, in ISO format. `kibanaBaseUrl` -: The configured [`server.publicBaseUrl`](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl). If not configured, this will be empty. +: The configured [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl). If not configured, this will be empty. `rule.id` : The rule identifier. diff --git a/explore-analyze/alerts-cases/cases/manage-cases.md b/explore-analyze/alerts-cases/cases/manage-cases.md index dda00f0138..b6b8eca3fd 100644 --- a/explore-analyze/alerts-cases/cases/manage-cases.md +++ b/explore-analyze/alerts-cases/cases/manage-cases.md @@ -68,7 +68,7 @@ For self-managed {{kib}}: hasAuth: true/false ``` -3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting. +3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. When you subsequently add assignees to cases, they receive an email. @@ -81,7 +81,7 @@ After you create a case, you can upload and manage files on the **Files** tab: :screenshot: ::: -The acceptable file types and sizes are affected by your [case settings](../../../deploy-manage/deploy/self-managed/configure.md). +The acceptable file types and sizes are affected by your [case settings](kibana://reference/configuration-reference/general-settings.md). To download or delete the file or copy the file hash to your clipboard, open the action menu (…). The available hash functions are MD5, SHA-1, and SHA-256. diff --git a/explore-analyze/alerts-cases/cases/setup-cases.md b/explore-analyze/alerts-cases/cases/setup-cases.md index 6b58a51626..7fff510b73 100644 --- a/explore-analyze/alerts-cases/cases/setup-cases.md +++ b/explore-analyze/alerts-cases/cases/setup-cases.md @@ -58,5 +58,5 @@ You can customize sub-feature privileges for deleting cases and comments, editin For more details, refer to [{{kib}} privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md). ::::{note} -If you are using an on-premises {{kib}} deployment and you want the email notifications and the external incident management systems to contain links back to {{kib}}, you must configure the [`server.publicBaseUrl`](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting. +If you are using an on-premises {{kib}} deployment and you want the email notifications and the external incident management systems to contain links back to {{kib}}, you must configure the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. :::: diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index 41db53c3d5..1251477516 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -80,7 +80,7 @@ Import multiple objects in a single operation. 4. Click **Import**. ::::{note} -The [`savedObjects.maxImportExportSize`](/deploy-manage/deploy/self-managed/configure.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](/deploy-manage/deploy/self-managed/configure.md#savedObjects-maxImportPayloadBytes) setting limits the overall size of the file that you can import. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportPayloadBytes) setting limits the overall size of the file that you can import. :::: @@ -94,7 +94,7 @@ Export objects by selection or type. {{kib}} creates an NDJSON with all your saved objects. By default, the NDJSON includes child objects related to the saved objects. Exported dashboards include their associated {{data-sources}}. ::::{note} -The [`savedObjects.maxImportExportSize`](/deploy-manage/deploy/self-managed/configure.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects that you can export. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects that you can export. :::: diff --git a/explore-analyze/report-and-share/reporting-troubleshooting.md b/explore-analyze/report-and-share/reporting-troubleshooting.md index 723857cca1..09593c81fd 100644 --- a/explore-analyze/report-and-share/reporting-troubleshooting.md +++ b/explore-analyze/report-and-share/reporting-troubleshooting.md @@ -62,7 +62,7 @@ Create a Markdown visualization and then create a PDF report. If this succeeds, logging.root.level: all ``` -For more information about logging, check out [Kibana configuration settings](../../deploy-manage/deploy/self-managed/configure.md#logging-root-level). +For more information about logging, check out [Kibana configuration settings](kibana://reference/configuration-reference/general-settings.md#logging-root-level). diff --git a/explore-analyze/visualize/manage-panels.md b/explore-analyze/visualize/manage-panels.md index eacca46154..2372106bec 100644 --- a/explore-analyze/visualize/manage-panels.md +++ b/explore-analyze/visualize/manage-panels.md @@ -72,13 +72,13 @@ There are three types of **Discover** interactions you can add to dashboard pane * **Panel interactions** — Opens panel data in **Discover**, including the dashboard-level filters, but not the panel-level filters. - To enable panel interactions, configure [`xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled`](../../deploy-manage/deploy/self-managed/configure.md#settings-explore-data-in-context) in kibana.yml. If you are using 7.13.0 and earlier, panel interactions are enabled by default. + To enable panel interactions, configure [`xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled`](kibana://reference/configuration-reference/general-settings.md#settings-explore-data-in-context) in kibana.yml. If you are using 7.13.0 and earlier, panel interactions are enabled by default. To use panel interactions, open the panel menu and click **Explore underlying data**. * **Series data interactions** — Opens the series data in **Discover**. - To enable series data interactions, configure [`xpack.discoverEnhanced.actions.exploreDataInChart.enabled`](../../deploy-manage/deploy/self-managed/configure.md#settings-explore-data-in-chart) in kibana.yml. If you are using 7.13.0 and earlier, data series interactions are enabled by default. + To enable series data interactions, configure [`xpack.discoverEnhanced.actions.exploreDataInChart.enabled`](kibana://reference/configuration-reference/general-settings.md#settings-explore-data-in-chart) in kibana.yml. If you are using 7.13.0 and earlier, data series interactions are enabled by default. To use series data interactions, click a data series in the panel. diff --git a/explore-analyze/visualize/maps/maps-connect-to-ems.md b/explore-analyze/visualize/maps/maps-connect-to-ems.md index 45c111802c..b17f225b03 100644 --- a/explore-analyze/visualize/maps/maps-connect-to-ems.md +++ b/explore-analyze/visualize/maps/maps-connect-to-ems.md @@ -465,7 +465,7 @@ X-Firefox-Spdy: h2 You might experience EMS connection issues if your Kibana server or browser are on a private network or behind a firewall. If this happens, you can disable the EMS connection to avoid unnecessary EMS requests. -To disable EMS, change your [kibana.yml](../../../deploy-manage/deploy/self-managed/configure.md) file. +To disable EMS, change your [kibana.yml](kibana://reference/configuration-reference/general-settings.md) file. 1. Set `map.includeElasticMapsService` to `false` to turn off the EMS connection. 2. Set `map.tilemap.url` to the URL of your tile server. This configures the default tile layer of Maps. @@ -534,9 +534,9 @@ If you cannot connect to Elastic Maps Service from the {{kib}} server or browser | | | | --- | --- | -| $$$ems-host$$$`host` | Specifies the host of the backend server. To allow remote users to connect, set the value to the IP address or DNS name of the {{hosted-ems}} container. **Default: *your-hostname***. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-host). | -| `port` | Specifies the port used by the backend server. Default: **`8080`**. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-port). | -| `basePath` | Specify a path at which to mount the server if you are running behind a proxy. This setting cannot end in a slash (`/`). [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-basePath). | +| $$$ems-host$$$`host` | Specifies the host of the backend server. To allow remote users to connect, set the value to the IP address or DNS name of the {{hosted-ems}} container. **Default: *your-hostname***. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-host). | +| `port` | Specifies the port used by the backend server. Default: **`8080`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-port). | +| `basePath` | Specify a path at which to mount the server if you are running behind a proxy. This setting cannot end in a slash (`/`). [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-basePath). | | `ui` | Controls the display of the status page and the layer preview. **Default: `true`** | | `logging.level` | Verbosity of {{hosted-ems}} logs. Valid values are `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `silent`. **Default: `info`** | | `path.planet` | Path of the basemaps database. **Default: `/usr/src/app/data/planet.mbtiles`** | @@ -547,19 +547,19 @@ If you cannot connect to Elastic Maps Service from the {{kib}} server or browser | --- | --- | | `elasticsearch.host` | URL of the {{es}} instance to use for license validation. | | `elasticsearch.username` and `elasticsearch.password` | Credentials of a user with at least the `monitor` role. | -| `elasticsearch.ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by {{hosted-ems}} to establish trust when connecting to your {{es}} cluster. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#elasticsearch-ssl-certificateAuthorities). | -| `elasticsearch.ssl.certificate` and `elasticsearch.ssl.key`, and `elasticsearch.ssl.keyPassphrase` | Optional settings that provide the paths to the PEM-format SSL certificate and key files and the key password. These files are used to verify the identity of {{hosted-ems}} to {{es}} and are required when `xpack.security.http.ssl.client_authentication` in {{es}} is set to `required`. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#elasticsearch-ssl-cert-key). | -| `elasticsearch.ssl.verificationMode` | Controls the verification of the server certificate that {{hosted-ems}} receives when making an outbound SSL/TLS connection to {{es}}. Valid values are "`full`", "`certificate`", and "`none`". Using "`full`" performs hostname verification, using "`certificate`" skips hostname verification, and using "`none`" skips verification entirely. **Default: `full`**. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#elasticsearch-ssl-verificationMode). | +| `elasticsearch.ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by {{hosted-ems}} to establish trust when connecting to your {{es}} cluster. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-certificateAuthorities). | +| `elasticsearch.ssl.certificate` and `elasticsearch.ssl.key`, and `elasticsearch.ssl.keyPassphrase` | Optional settings that provide the paths to the PEM-format SSL certificate and key files and the key password. These files are used to verify the identity of {{hosted-ems}} to {{es}} and are required when `xpack.security.http.ssl.client_authentication` in {{es}} is set to `required`. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-cert-key). | +| `elasticsearch.ssl.verificationMode` | Controls the verification of the server certificate that {{hosted-ems}} receives when making an outbound SSL/TLS connection to {{es}}. Valid values are "`full`", "`certificate`", and "`none`". Using "`full`" performs hostname verification, using "`certificate`" skips hostname verification, and using "`none`" skips verification entirely. **Default: `full`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-verificationMode). | **Server security settings** | | | | --- | --- | -| `ssl.enabled` | Enables SSL/TLS for inbound connections to {{hosted-ems}}. When set to `true`, a certificate and its corresponding private key must be provided. **Default: `false`**. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-ssl-enabled). | -| `ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by the {{hosted-ems}} to establish trust when receiving inbound SSL/TLS connections from end users. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-ssl-certificateAuthorities). | -| `ssl.key`, `ssl.certificate`, and `ssl.keyPassphrase` | Location of yor SSL key and certificate files and the password that decrypts the private key that is specified via `ssl.key`. This password is optional, as the key may not be encrypted. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-ssl-cert-key). | -| `ssl.supportedProtocols` | An array of supported protocols with versions.Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`. **Default: `TLSv1.1`, `TLSv1.2`**. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-ssl-supportedProtocols). | -| `ssl.cipherSuites` | Details on the format, and the valid options, are available via the[OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.1.1/man1/ciphers.html#CIPHER-LIST-FORMAT).**Default: `TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA`**. [Equivalent {{kib}} setting](../../../deploy-manage/deploy/self-managed/configure.md#server-ssl-cipherSuites). | +| `ssl.enabled` | Enables SSL/TLS for inbound connections to {{hosted-ems}}. When set to `true`, a certificate and its corresponding private key must be provided. **Default: `false`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-enabled). | +| `ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by the {{hosted-ems}} to establish trust when receiving inbound SSL/TLS connections from end users. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-certificateAuthorities). | +| `ssl.key`, `ssl.certificate`, and `ssl.keyPassphrase` | Location of yor SSL key and certificate files and the password that decrypts the private key that is specified via `ssl.key`. This password is optional, as the key may not be encrypted. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-cert-key). | +| `ssl.supportedProtocols` | An array of supported protocols with versions.Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`. **Default: `TLSv1.1`, `TLSv1.2`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-supportedProtocols). | +| `ssl.cipherSuites` | Details on the format, and the valid options, are available via the[OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.1.1/man1/ciphers.html#CIPHER-LIST-FORMAT).**Default: `TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-cipherSuites). | #### Bind-mounted configuration [elastic-maps-server-bind-mount-config] @@ -614,7 +614,7 @@ The available basemaps and boundaries can be explored from the `/maps` endpoint ### Kibana configuration [elastic-maps-server-kibana] -With {{hosted-ems}} running, add the `map.emsUrl` configuration key in your [kibana.yml](../../../deploy-manage/deploy/self-managed/configure.md) file pointing to the root of the service. This setting will point {{kib}} to request EMS basemaps and boundaries from {{hosted-ems}}. Typically this will be the URL to the [host and port](#ems-host) of {{hosted-ems}}. For example, `map.emsUrl: https://my-ems-server:8080`. +With {{hosted-ems}} running, add the `map.emsUrl` configuration key in your [kibana.yml](kibana://reference/configuration-reference/general-settings.md) file pointing to the root of the service. This setting will point {{kib}} to request EMS basemaps and boundaries from {{hosted-ems}}. Typically this will be the URL to the [host and port](#ems-host) of {{hosted-ems}}. For example, `map.emsUrl: https://my-ems-server:8080`. ### Status check [elastic-maps-server-check] diff --git a/explore-analyze/visualize/maps/maps-troubleshooting.md b/explore-analyze/visualize/maps/maps-troubleshooting.md index 9c88eb0259..58a5f5484b 100644 --- a/explore-analyze/visualize/maps/maps-troubleshooting.md +++ b/explore-analyze/visualize/maps/maps-troubleshooting.md @@ -47,7 +47,7 @@ Maps uses the [{{es}} vector tile search API](https://www.elastic.co/docs/api/do * Ensure your geospatial field is searchable and aggregatable. * If your geospatial field type does not match your Elasticsearch mapping, click the **Refresh** button to refresh the field list from Elasticsearch. -* Data views with thousands of fields can exceed the default maximum payload size. Increase [`server.maxPayload`](../../../deploy-manage/deploy/self-managed/configure.md) for large data views. +* Data views with thousands of fields can exceed the default maximum payload size. Increase [`server.maxPayload`](kibana://reference/configuration-reference/general-settings.md) for large data views. ### Features are not displayed [_features_are_not_displayed] diff --git a/explore-analyze/visualize/maps/tile-layer.md b/explore-analyze/visualize/maps/tile-layer.md index 2bfb11b42d..24b54dc329 100644 --- a/explore-analyze/visualize/maps/tile-layer.md +++ b/explore-analyze/visualize/maps/tile-layer.md @@ -18,7 +18,7 @@ Tile layers display image tiles served from a tile server. To add a tile layer to your map, click **Add layer**, then select one of the following: **Configured Tile Map Service** -: Tile map service configured in kibana.yml. See map.tilemap.url in [*Configure {{kib}}*](../../../deploy-manage/deploy/self-managed/configure.md) for details. +: Tile map service configured in kibana.yml. See map.tilemap.url in [*Configure {{kib}}*](kibana://reference/configuration-reference/general-settings.md) for details. **EMS Basemaps** : Tile map service from [Elastic Maps Service](https://www.elastic.co/elastic-maps-service). diff --git a/raw-migrated-files/kibana/kibana/Security-production-considerations.md b/raw-migrated-files/kibana/kibana/Security-production-considerations.md index def16107eb..1ec8ed02e6 100644 --- a/raw-migrated-files/kibana/kibana/Security-production-considerations.md +++ b/raw-migrated-files/kibana/kibana/Security-production-considerations.md @@ -32,7 +32,7 @@ The {{kib}} server can instruct browsers to enable additional security controls 1. Enable HTTP Strict-Transport-Security. - Use [`strictTransportSecurity`](../../../deploy-manage/deploy/self-managed/configure.md#server-securityResponseHeaders-strictTransportSecurity) to ensure that browsers will only attempt to access {{kib}} with SSL/TLS encryption. This is designed to prevent manipulator-in-the-middle attacks. To configure this with a lifetime of one year in your `kibana.yml`: + Use [`strictTransportSecurity`](kibana://reference/configuration-reference/general-settings.md#server-securityResponseHeaders-strictTransportSecurity) to ensure that browsers will only attempt to access {{kib}} with SSL/TLS encryption. This is designed to prevent manipulator-in-the-middle attacks. To configure this with a lifetime of one year in your `kibana.yml`: ```js server.securityResponseHeaders.strictTransportSecurity: "max-age=31536000" diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index 9cc6d1f5c4..6b92dfd787 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -432,7 +432,7 @@ As with {{es}}, you can use RPM to install {{kib}} on another host. You can find sudo systemctl enable kibana.service ``` -10. Before starting the {{kib}} service there’s one configuration change to make, to set {{kib}} to run on the {{es}} host IP address. This is done by updating the settings in the `kibana.yml` file. For details about all available settings refer to [Configure {{kib}}](../../../deploy-manage/deploy/self-managed/configure.md). +10. Before starting the {{kib}} service there’s one configuration change to make, to set {{kib}} to run on the {{es}} host IP address. This is done by updating the settings in the `kibana.yml` file. For details about all available settings refer to [Configure {{kib}}](kibana://reference/configuration-reference/general-settings.md). 11. In a terminal, run the `ifconfig` command and copy the value for the host inet IP address. 12. Open the {{kib}} configuration file for editing: diff --git a/raw-migrated-files/toc.yml b/raw-migrated-files/toc.yml index 298ade5c7e..d2b4072e39 100644 --- a/raw-migrated-files/toc.yml +++ b/raw-migrated-files/toc.yml @@ -189,7 +189,6 @@ toc: - file: elasticsearch-hadoop/elasticsearch-hadoop/doc-sections.md - file: elasticsearch/elasticsearch-reference/index.md children: - - file: elasticsearch/elasticsearch-reference/configuring-stack-security.md - file: elasticsearch/elasticsearch-reference/data-management.md - file: elasticsearch/elasticsearch-reference/documents-indices.md - file: elasticsearch/elasticsearch-reference/es-security-principles.md diff --git a/redirects.yml b/redirects.yml index 837c181fd5..8b1f999431 100644 --- a/redirects.yml +++ b/redirects.yml @@ -20,6 +20,7 @@ redirects: 'deploy-manage/deploy/cloud-enterprise/deploy-large-installation-cloud.md': '!deploy-manage/deploy/cloud-enterprise/deploy-large-installation.md' 'deploy-manage/deploy/self-managed/install-with-docker.md': '!deploy-manage/deploy/self-managed/install-kibana-with-docker.md' 'deploy-manage/deploy/self-managed/deploy-cluster.md': '!deploy-manage/deploy/self-managed/installing-elasticsearch.md' + 'deploy-manage/deploy/self-managed/configure.md': '!deploy-manage/deploy/self-managed/configure-kibana.md' ## explore-analyze 'explore-analyze/machine-learning/nlp/ml-nlp-auto-scale.md': '!deploy-manage/autoscaling/trained-model-autoscaling.md' diff --git a/solutions/observability/apps/fleet-managed-apm-server.md b/solutions/observability/apps/fleet-managed-apm-server.md index 116f54cfa5..e2f880bbdb 100644 --- a/solutions/observability/apps/fleet-managed-apm-server.md +++ b/solutions/observability/apps/fleet-managed-apm-server.md @@ -159,7 +159,7 @@ xpack.fleet.packages: version: latest ``` -See [Configure Kibana](../../../deploy-manage/deploy/self-managed/configure.md) to learn more about how to edit the Kibana configuration file. +See [Configure Kibana](kibana://reference/configuration-reference/general-settings.md) to learn more about how to edit the Kibana configuration file. Option 2: Use the {{fleet}} API diff --git a/solutions/observability/incident-management/configure-access-to-cases.md b/solutions/observability/incident-management/configure-access-to-cases.md index 475b5f0140..2b4e1dddac 100644 --- a/solutions/observability/incident-management/configure-access-to-cases.md +++ b/solutions/observability/incident-management/configure-access-to-cases.md @@ -8,7 +8,7 @@ mapped_pages: To access and send cases to external systems, you need the [appropriate license](https://www.elastic.co/subscriptions), and your role must have the **Cases** {{kib}} privilege as a user for the **{{observability}}** feature. ::::{note} -If you are using an on-premises {{kib}} deployment and want your email notifications and external incident management systems to contain links back to {{kib}}, configure the [server.publicBaseUrl](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting. +If you are using an on-premises {{kib}} deployment and want your email notifications and external incident management systems to contain links back to {{kib}}, configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. :::: diff --git a/solutions/observability/logs/configure-data-sources.md b/solutions/observability/logs/configure-data-sources.md index 38d94dc3af..048b3d6431 100644 --- a/solutions/observability/logs/configure-data-sources.md +++ b/solutions/observability/logs/configure-data-sources.md @@ -18,7 +18,7 @@ To activate the Logs Stream app, refer to [Activate Logs Stream](logs-stream.md# :::: -Specify the source configuration for logs in the [Logs settings](kibana://reference/configuration-reference/logs-settings.md) in the [{{kib}} configuration file](../../../deploy-manage/deploy/self-managed/configure.md). By default, the configuration uses the index patterns stored in the {{kib}} log sources advanced setting to query the data. The configuration also defines the default columns displayed in the logs stream. +Specify the source configuration for logs in the [Logs settings](kibana://reference/configuration-reference/logs-settings.md) in the [{{kib}} configuration file](kibana://reference/configuration-reference/general-settings.md). By default, the configuration uses the index patterns stored in the {{kib}} log sources advanced setting to query the data. The configuration also defines the default columns displayed in the logs stream. If your logs have custom index patterns, use non-default field settings, or contain parsed fields that you want to expose as individual columns, you can override the default configuration settings. diff --git a/solutions/security/detect-and-alert/detections-requirements.md b/solutions/security/detect-and-alert/detections-requirements.md index 2d0760453d..d257f637d8 100644 --- a/solutions/security/detect-and-alert/detections-requirements.md +++ b/solutions/security/detect-and-alert/detections-requirements.md @@ -22,7 +22,7 @@ These steps are only required for **self-managed** deployments: * HTTPS must be configured for communication between [{{es}} and {{kib}}](/deploy-manage/security/set-up-basic-security-plus-https.md#encrypt-kibana-http). * In the `elasticsearch.yml` configuration file, set the `xpack.security.enabled` setting to `true`. For more information, refer to [Configuring {{es}}](/deploy-manage/deploy/self-managed/configure-elasticsearch.md) and [Security settings in {{es}}](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md). -* In the `kibana.yml` [configuration file](/deploy-manage/deploy/self-managed/configure.md), add the `xpack.encryptedSavedObjects.encryptionKey` setting with any alphanumeric value of at least 32 characters. For example: +* In the `kibana.yml` [configuration file](kibana://reference/configuration-reference/general-settings.md), add the `xpack.encryptedSavedObjects.encryptionKey` setting with any alphanumeric value of at least 32 characters. For example: `xpack.encryptedSavedObjects.encryptionKey: 'fhjskloppd678ehkdfdlliverpoolfcr'` @@ -80,7 +80,7 @@ You can set limits to the number of bytes and the buffer size used to upload [va To set the value: -1. Open `kibana.yml` [configuration file](/deploy-manage/deploy/self-managed/configure.md) or edit your {{kib}} cloud instance. +1. Open `kibana.yml` [configuration file](kibana://reference/configuration-reference/general-settings.md) or edit your {{kib}} cloud instance. 2. Add any of these settings and their required values: * `xpack.lists.maxImportPayloadBytes`: Sets the number of bytes allowed for uploading {{elastic-sec}} value lists (default `9000000`, maximum `100000000`). For every 10 megabytes, it is recommended to have an additional 1 gigabyte of RAM reserved for Kibana. diff --git a/solutions/security/detect-and-alert/view-detection-alert-details.md b/solutions/security/detect-and-alert/view-detection-alert-details.md index 365c8c5e3b..d180a9fb45 100644 --- a/solutions/security/detect-and-alert/view-detection-alert-details.md +++ b/solutions/security/detect-and-alert/view-detection-alert-details.md @@ -38,7 +38,7 @@ From the right panel, you can also: ::::{note} For {{stack}} users only: - If you’ve configured the [`server.publicBaseUrl`](/deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting in the `kibana.yml` file, the shareable URL is also in the `kibana.alert.url` field. You can find the field by searching for `kibana.alert.url` on the **Table** tab. + If you’ve configured the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting in the `kibana.yml` file, the shareable URL is also in the `kibana.alert.url` field. You can find the field by searching for `kibana.alert.url` on the **Table** tab. :::: diff --git a/solutions/security/investigate/cases-requirements.md b/solutions/security/investigate/cases-requirements.md index d0c9fab3d9..26f0d928d7 100644 --- a/solutions/security/investigate/cases-requirements.md +++ b/solutions/security/investigate/cases-requirements.md @@ -11,7 +11,7 @@ mapped_urls: - You need particular subscriptions and privileges to manage case attachments. For example in {{stack}}, to add alerts to cases, you must have privileges for [managing alerts](/solutions/security/detect-and-alert/detections-requirements.md#enable-detections-ui). In {{serverless-short}}, you need the Security Analytics Complete [project feature](../../../deploy-manage/deploy/elastic-cloud/project-settings.md). -- If you have an on-premises deployment and want email notifications and external incident management systems to contain links back to {{kib}}, you must configure the [server.publicBaseUrl](/deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting. +- If you have an on-premises deployment and want email notifications and external incident management systems to contain links back to {{kib}}, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. :::: diff --git a/solutions/security/investigate/open-manage-cases.md b/solutions/security/investigate/open-manage-cases.md index 52504ef725..cb9ba4855d 100644 --- a/solutions/security/investigate/open-manage-cases.md +++ b/solutions/security/investigate/open-manage-cases.md @@ -64,7 +64,7 @@ For self-managed {{kib}}: :::: 2. Set the `notifications.connectors.default.email` {{kib}} setting to the name of your email connector. -3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](/deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) setting. +3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. When you subsequently add assignees to cases, they receive an email. diff --git a/troubleshoot/elasticsearch/mapping-explosion.md b/troubleshoot/elasticsearch/mapping-explosion.md index 2cece3c24f..e804f8b2f6 100644 --- a/troubleshoot/elasticsearch/mapping-explosion.md +++ b/troubleshoot/elasticsearch/mapping-explosion.md @@ -14,7 +14,7 @@ Mapping explosion may surface as the following performance symptoms: * [CAT tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks) reporting long index durations only related to this index or indices. This usually relates to [pending tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. * Discover’s **Fields for wildcard** page-loading API command or [Dev Tools](../../explore-analyze/query-filter/tools/console.md) page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or timing out in the browser’s Developer Tools Network tab. For more information, refer to our [walkthrough on troubleshooting Discover](https://www.elastic.co/blog/troubleshooting-guide-common-issues-kibana-discover-load). * Discover’s **Available fields** taking a long time to compile Javascript in the browser’s Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. -* Kibana’s [alerting](../../explore-analyze/alerts-cases/alerts.md) or [security rules](../../solutions/security/detect-and-alert.md) may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {{kib}}'s [`server-maxPayload`](../../deploy-manage/deploy/self-managed/configure.md#server-maxPayload). +* Kibana’s [alerting](../../explore-analyze/alerts-cases/alerts.md) or [security rules](../../solutions/security/detect-and-alert.md) may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {{kib}}'s [`server-maxPayload`](kibana://reference/configuration-reference/general-settings.md#server-maxPayload). * Long {{es}} start-up durations. diff --git a/troubleshoot/kibana/access.md b/troubleshoot/kibana/access.md index 6210addb8a..f9c70faace 100644 --- a/troubleshoot/kibana/access.md +++ b/troubleshoot/kibana/access.md @@ -27,7 +27,7 @@ mapped_pages: % % 1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. % -% To remotely connect to {{kib}}, set [server.host](../../deploy-manage/deploy/self-managed/configure.md#server-host) to a non-loopback address. +% To remotely connect to {{kib}}, set [server.host](kibana://reference/configuration-reference/general-settings.md#server-host) to a non-loopback address. % % 2. Log on to your account. % 3. Go to the home page, then click **{{kib}}**. diff --git a/troubleshoot/kibana/maps.md b/troubleshoot/kibana/maps.md index 61ff3f66e4..75eda3184e 100644 --- a/troubleshoot/kibana/maps.md +++ b/troubleshoot/kibana/maps.md @@ -44,7 +44,7 @@ Maps uses the [{{es}} vector tile search API](https://www.elastic.co/docs/api/do * Ensure your geospatial field is searchable and aggregatable. * If your geospatial field type does not match your Elasticsearch mapping, click the **Refresh** button to refresh the field list from Elasticsearch. -* Data views with thousands of fields can exceed the default maximum payload size. Increase [`server.maxPayload`](../../deploy-manage/deploy/self-managed/configure.md) for large data views. +* Data views with thousands of fields can exceed the default maximum payload size. Increase [`server.maxPayload`](kibana://reference/configuration-reference/general-settings.md) for large data views. ### Features are not displayed [_features_are_not_displayed] diff --git a/troubleshoot/kibana/reporting.md b/troubleshoot/kibana/reporting.md index 4b21497555..65d4299d62 100644 --- a/troubleshoot/kibana/reporting.md +++ b/troubleshoot/kibana/reporting.md @@ -59,7 +59,7 @@ Create a Markdown visualization and then create a PDF report. If this succeeds, logging.root.level: all ``` -For more information about logging, check out [Kibana configuration settings](../../deploy-manage/deploy/self-managed/configure.md#logging-root-level). +For more information about logging, check out [Kibana configuration settings](kibana://reference/configuration-reference/general-settings.md#logging-root-level). From b24587353bfef508cd945a87d8cc0e8c6747db4b Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 16:32:44 -0400 Subject: [PATCH 29/43] more fixes --- .../elastic-cloud/restrictions-known-problems.md | 2 +- .../deploy/self-managed/_snippets/enroll-nodes.md | 2 +- .../_snippets/install-kib-next-steps.md | 2 +- .../self-managed/_snippets/install-next-steps.md | 2 +- .../deploy/self-managed/_snippets/targz-daemon.md | 2 +- .../install-kibana-from-archive-on-linux-macos.md | 2 +- .../self-managed/install-kibana-on-windows.md | 2 +- .../install-kibana-with-debian-package.md | 2 +- .../self-managed/install-kibana-with-rpm.md | 2 +- .../deploy/self-managed/install-kibana.md | 4 ++-- .../self-managed/installing-elasticsearch.md | 3 +++ .../visualizing-monitoring-data.md | 2 +- .../kibana-in-production-environments.md | 4 ++-- .../find-and-organize/saved-objects.md | 4 ++-- .../cloud-heroku/ech-manage-kibana-settings.md | 2 +- .../cloud/cloud/ec-manage-kibana-settings.md | 2 +- .../kibana/Security-production-considerations.md | 2 +- .../elastic-stack/installing-stack-demo-self.md | 15 +-------------- troubleshoot/elasticsearch/mapping-explosion.md | 2 +- 19 files changed, 24 insertions(+), 34 deletions(-) diff --git a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md index 305abb8d14..19f484aea1 100644 --- a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md +++ b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md @@ -55,7 +55,7 @@ Elasticsearch APIs $$$ec-restrictions-apis-kibana$$$ Kibana APIs -: There are no rate limits restricting your use of the Kibana APIs. However, Kibana features are affected by the [Kibana configuration settings](../self-managed/configure.md), not all of which are supported in {{ecloud}}. For a list of what settings are currently supported, check [Add Kibana user settings](edit-stack-settings.md). For all details about using the Kibana APIs, check the [Kibana API reference documentation](https://www.elastic.co/guide/en/kibana/current/api.html). +: There are no rate limits restricting your use of the Kibana APIs. However, Kibana features are affected by the [Kibana configuration settings](/deploy-manage/deploy/self-managed/configure-kibana.md), not all of which are supported in {{ecloud}}. For a list of what settings are currently supported, check [Add Kibana user settings](edit-stack-settings.md). For all details about using the Kibana APIs, check the [Kibana API reference documentation](https://www.elastic.co/guide/en/kibana/current/api.html). ## Transport client [ec-restrictions-transport-client] diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md index 20f9d9f225..00abb09d51 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md @@ -28,4 +28,4 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti 3. Repeat the previous step for any new nodes that you want to enroll. -For more information about discovery and shard allocation, refer to [Discovery and cluster formation](../distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](elasticsearch://reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). \ No newline at end of file +For more information about discovery and shard allocation, refer to [Discovery and cluster formation](/deploy-manage/distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](elasticsearch://reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md index 8b1e806790..ba9857b0c3 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-kib-next-steps.md @@ -2,4 +2,4 @@ You now have a basic {{kib}} instance set up. Consider the following next steps: * Learn how to [configure {{kib}}](/deploy-manage/deploy/self-managed/configure-kibana.md). * Learn how to [access {{kib}}](/deploy-manage/deploy/self-managed/access-kibana.md). -* Explore [key configuration topics](/deploy-manage/deploy/self-managed/configure-kibana.md#additional-guides) to learn how to secure and manage {{kib}}. \ No newline at end of file +* Explore [key configuration topics](/deploy-manage/deploy/self-managed/configure-kibana.md#additional-topics) to learn how to secure and manage {{kib}}. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md index 38ac471011..49a8735e4e 100644 --- a/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md +++ b/deploy-manage/deploy/self-managed/_snippets/install-next-steps.md @@ -9,4 +9,4 @@ You can also do the following: * Consider installing [additional {{stack}} components](/get-started/the-stack.md). * Learn how to [ingest data into {{es}}](/manage-data/index.md). * Learn about [production considerations](/deploy-manage/production-guidance.md). -* Explore [other topics](/deploy-manage/deploy/self-managed#other-important-sections.md) to learn how to secure and manage your cluster. \ No newline at end of file +* Explore [other topics](/deploy-manage/deploy/self-managed.md#other-important-sections) to learn how to secure and manage your cluster. \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md index 83e68d4144..e337a09748 100644 --- a/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md +++ b/deploy-manage/deploy/self-managed/_snippets/targz-daemon.md @@ -15,5 +15,5 @@ pkill -F pid ``` ::::{note} -The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](/deploy-manage/deploy/self-managed/install-with-debian-package.md) or [RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) package instead. +The {{es}} `.tar.gz` package does not include the `systemd` module. To manage {{es}} as a service, use the [Debian](/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md) or [RPM](/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md) package instead. :::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 130c505c81..90e72d800d 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -95,7 +95,7 @@ By default, {{kib}} runs in the foreground, prints its logs to the standard outp ## Step 4: Configure {{kib}} using the config file [targz-configuring] -{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure.md). +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure-kibana.md). ## Directory layout of `.tar.gz` archives [targz-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md index 8f7db869bc..471835c2e5 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -55,7 +55,7 @@ By default, {{kib}} runs in the foreground, prints its logs to `STDOUT`, and can ## Step 4: Configure {{kib}} using the config file [windows-configuring] -{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure.md). +{{kib}} loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in [](configure-kibana.md). ## Directory layout of `.zip` archive [windows-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index 66b6efd4de..ca51d5fa18 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -129,7 +129,7 @@ These commands provide no feedback as to whether {{kib}} was started successfull ## Step 6: Configure {{kib}} using the config file [deb-configuring] -{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure.md). +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure-kibana.md). ## Directory layout of Debian package [deb-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 0cdd131bac..84115f798e 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -119,7 +119,7 @@ These commands provide no feedback as to whether {{kib}} was started successfull ## Step 6: Configure {{kib}} using the config file [rpm-configuring] -{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure.md). +{{kib}} loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in [](configure-kibana.md). ## Directory layout of RPM [rpm-layout] diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index 3cc46614a8..680d00ca45 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -35,8 +35,8 @@ To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} f | --- | --- | --- | | `tar.gz` | The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. | [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md)| | `zip` | The `zip` package is the only supported package for Windows.| [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md)| -| `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. | [Install with Debian package](/deploy-manage/deploy/self-managed/install-with-debian-package.md) | -| `rpm` | The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. | [Install with RPM](/deploy-manage/deploy/self-managed/install-with-rpm.md) | +| `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. | [Install with Debian package](/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md) | +| `rpm` | The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. | [Install with RPM](/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md) | | `docker` | Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. | [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-kibana-with-docker.md) | ## {{es}} version [elasticsearch-version] diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index df1ca6f20a..d80508a607 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -13,6 +13,8 @@ applies_to: # Deploy an {{es}} cluster +% will be fixed + $$$stack-security-certificates$$$ This section includes information on how to set up {{es}} and get it running, including: @@ -34,6 +36,7 @@ To try out {{stack}} on your own machine, we recommend using Docker and running :::: ::::{admonition} Use dedicated hosts +$$$dedicated-host$$$ :::{include} _snippets/dedicated-hosts.md ::: :::: diff --git a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md index e9393c805c..d9f3489019 100644 --- a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md +++ b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md @@ -16,7 +16,7 @@ The {{kib}} {{monitor-features}} serve two separate purposes: 1. To visualize monitoring data from across the {{stack}}. You can view health and performance data for {{es}}, {{ls}}, APM, and Beats in real time, as well as analyze past performance. 2. To monitor {{kib}} itself and route that data to the monitoring cluster. -If you enable monitoring across the {{stack}}, each monitored component is considered unique based on its persistent UUID, which is written to the [`path.data`](../../deploy/self-managed/configure.md) directory when the node or instance starts. +If you enable monitoring across the {{stack}}, each monitored component is considered unique based on its persistent UUID, which is written to the [`path.data`](kibana://reference/configuration-reference/general-settings.md#path-data) directory when the node or instance starts. For more information, see [Configure monitoring](../stack-monitoring/kibana-monitoring-self-managed.md) and [Monitor a cluster](../../monitor.md). diff --git a/deploy-manage/production-guidance/kibana-in-production-environments.md b/deploy-manage/production-guidance/kibana-in-production-environments.md index db354a5a8e..718211b482 100644 --- a/deploy-manage/production-guidance/kibana-in-production-environments.md +++ b/deploy-manage/production-guidance/kibana-in-production-environments.md @@ -22,7 +22,7 @@ While {{kib}} isn’t terribly resource intensive, we still recommend running {{ ## Load balancing across multiple {{kib}} instances [load-balancing-kibana] -To serve multiple {{kib}} installations behind a load balancer, you must change the configuration. See [Configuring {{kib}}](../deploy/self-managed/configure.md) for details on each setting. +To serve multiple {{kib}} installations behind a load balancer, you must change the configuration. See [Configuring {{kib}}](../deploy/self-managed/configure-kibana.md) for details on each setting. These settings must be unique across each {{kib}} instance: @@ -92,7 +92,7 @@ elasticsearch.hosts: - http://elasticsearch2:9200 ``` -Related configurations include `elasticsearch.sniffInterval`, `elasticsearch.sniffOnStart`, and `elasticsearch.sniffOnConnectionFault`. These can be used to automatically update the list of hosts as a cluster is resized. Parameters can be found on the [settings page](../deploy/self-managed/configure.md). +Related configurations include `elasticsearch.sniffInterval`, `elasticsearch.sniffOnStart`, and `elasticsearch.sniffOnConnectionFault`. These can be used to automatically update the list of hosts as a cluster is resized. Parameters can be found on the [settings page](kibana://reference/configuration-reference/general-settings.md). ## Memory [memory] diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index 1251477516..e67e06589b 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -80,7 +80,7 @@ Import multiple objects in a single operation. 4. Click **Import**. ::::{note} -The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportPayloadBytes) setting limits the overall size of the file that you can import. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maximportpayloadbytes) setting limits the overall size of the file that you can import. :::: @@ -94,7 +94,7 @@ Export objects by selection or type. {{kib}} creates an NDJSON with all your saved objects. By default, the NDJSON includes child objects related to the saved objects. Exported dashboards include their associated {{data-sources}}. ::::{note} -The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects that you can export. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedobjects-maximportexportsize) configuration setting limits the number of saved objects that you can export. :::: diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md index 059c643555..7000c39a7b 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md @@ -112,7 +112,7 @@ If a setting is not supported by Elasticsearch Add-On for Heroku, you will get a : The maximum payload size in bytes for incoming server requests. Default: 1048576. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-maxpayload). `server.securityResponseHeaders.strictTransportSecurity` -: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-stricttransportsecurity). +: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md##server-securityresponseheaders-stricttransportsecurity). `server.securityResponseHeaders.xContentTypeOptions` : Controls whether the [`X-Content-Type-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-xcontenttypeoptions). diff --git a/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md b/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md index d20d62cb50..88ff28115b 100644 --- a/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md +++ b/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md @@ -112,7 +112,7 @@ If a setting is not supported by {{ech}}, you will get an error message when you : The maximum payload size in bytes for incoming server requests. Default: 1048576. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-maxpayload). `server.securityResponseHeaders.strictTransportSecurity` -: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-stricttransportsecurity). +: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md##server-securityresponseheaders-stricttransportsecurity). `server.securityResponseHeaders.xContentTypeOptions` : Controls whether the [`X-Content-Type-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-xcontenttypeoptions). diff --git a/raw-migrated-files/kibana/kibana/Security-production-considerations.md b/raw-migrated-files/kibana/kibana/Security-production-considerations.md index 1ec8ed02e6..7af43c0f56 100644 --- a/raw-migrated-files/kibana/kibana/Security-production-considerations.md +++ b/raw-migrated-files/kibana/kibana/Security-production-considerations.md @@ -32,7 +32,7 @@ The {{kib}} server can instruct browsers to enable additional security controls 1. Enable HTTP Strict-Transport-Security. - Use [`strictTransportSecurity`](kibana://reference/configuration-reference/general-settings.md#server-securityResponseHeaders-strictTransportSecurity) to ensure that browsers will only attempt to access {{kib}} with SSL/TLS encryption. This is designed to prevent manipulator-in-the-middle attacks. To configure this with a lifetime of one year in your `kibana.yml`: + Use [`strictTransportSecurity`](kibana://reference/configuration-reference/general-settings.md##server-securityresponseheaders-stricttransportsecurity) to ensure that browsers will only attempt to access {{kib}} with SSL/TLS encryption. This is designed to prevent manipulator-in-the-middle attacks. To configure this with a lifetime of one year in your `kibana.yml`: ```js server.securityResponseHeaders.strictTransportSecurity: "max-age=31536000" diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index 6b92dfd787..3a9ecd5563 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -4,19 +4,6 @@ This tutorial demonstrates how to install and configure the {{stack}} in a self- It should take between one and two hours to complete these steps. -* [Prerequisites and assumptions](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-prereqs) -* [{{stack}} overview](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-overview) -* [Step 1: Set up the first {{es}} node](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-first) -* [Step 2: Configure the first {{es}} node for connectivity](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-config) -* [Step 3: Start {{es}}](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-start) -* [Step 4: Set up a second {{es}} node](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-second) -* [Step 5: Set up additional {{es}} nodes](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-third) -* [Step 6: Install {{kib}}](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-kibana) -* [Step 7: Install {{fleet-server}}](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-fleet-server) -* [Step 8: Install {{agent}}](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elastic-agent) -* [Step 9: View your system data](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-view-data) -* [Next steps](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-next-steps) - ::::{important} If you’re using these steps to configure a production cluster that uses trusted CA-signed certificates for secure communications, after completing Step 6 to install {{kib}} we recommend jumping directly to [Tutorial 2: Securing a self-managed {{stack}}](../../../deploy-manage/security/secure-your-cluster-deployment.md). @@ -379,7 +366,7 @@ To set up your next {{es}} node, follow exactly the same steps as you did previo ## Step 6: Install {{kib}} [install-stack-self-kibana] -As with {{es}}, you can use RPM to install {{kib}} on another host. You can find details about all of the following steps in the section [Install {{kib}} with RPM](../../../deploy-manage/deploy/self-managed/install-with-rpm.md#install-rpm). +As with {{es}}, you can use RPM to install {{kib}} on another host. You can find details about all of the following steps in the section [Install {{kib}} with RPM](../../../deploy-manage/deploy/self-managed/install-kibana-with-rpm.md#install-rpm). 1. Log in to the host where you’d like to install {{kib}} and create a working directory for the installation package: diff --git a/troubleshoot/elasticsearch/mapping-explosion.md b/troubleshoot/elasticsearch/mapping-explosion.md index e804f8b2f6..27c1c8a06c 100644 --- a/troubleshoot/elasticsearch/mapping-explosion.md +++ b/troubleshoot/elasticsearch/mapping-explosion.md @@ -14,7 +14,7 @@ Mapping explosion may surface as the following performance symptoms: * [CAT tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks) reporting long index durations only related to this index or indices. This usually relates to [pending tasks](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. * Discover’s **Fields for wildcard** page-loading API command or [Dev Tools](../../explore-analyze/query-filter/tools/console.md) page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or timing out in the browser’s Developer Tools Network tab. For more information, refer to our [walkthrough on troubleshooting Discover](https://www.elastic.co/blog/troubleshooting-guide-common-issues-kibana-discover-load). * Discover’s **Available fields** taking a long time to compile Javascript in the browser’s Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. -* Kibana’s [alerting](../../explore-analyze/alerts-cases/alerts.md) or [security rules](../../solutions/security/detect-and-alert.md) may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {{kib}}'s [`server-maxPayload`](kibana://reference/configuration-reference/general-settings.md#server-maxPayload). +* Kibana’s [alerting](../../explore-analyze/alerts-cases/alerts.md) or [security rules](../../solutions/security/detect-and-alert.md) may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {{kib}}'s [`server-maxPayload`](kibana://reference/configuration-reference/general-settings.md#server-maxpayload). * Long {{es}} start-up durations. From 9d18e8745a0e8cca5807411f9bba69c84b7c37d1 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 17:10:34 -0400 Subject: [PATCH 30/43] more fix --- .../deploy/self-managed/air-gapped-install.md | 2 -- .../tools/snapshot-and-restore/create-snapshots.md | 2 +- .../alerts-cases/alerts/alerting-setup.md | 2 +- .../alerts-cases/alerts/rule-action-variables.md | 2 +- explore-analyze/alerts-cases/cases/manage-cases.md | 2 +- explore-analyze/alerts-cases/cases/setup-cases.md | 2 +- explore-analyze/find-and-organize/saved-objects.md | 2 +- .../visualize/maps/maps-connect-to-ems.md | 12 ++++++------ .../incident-management/configure-access-to-cases.md | 2 +- .../detect-and-alert/view-detection-alert-details.md | 2 +- solutions/security/investigate/cases-requirements.md | 2 +- solutions/security/investigate/open-manage-cases.md | 2 +- 12 files changed, 16 insertions(+), 18 deletions(-) diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index 265e9cef66..8465c8c778 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -56,8 +56,6 @@ Additionally, if the {{agent}} {{elastic-defend}} integration is used, then acce To learn more about install and configuration, refer to the [{{agent}} install documentation](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md). Make sure to check the requirements specific to running {{agents}} in an [air-gapped environment](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/air-gapped.md). -To get a better understanding of how to work with {{agent}} configuration settings and policies, refer to [Appendix D - Agent Integration Guide](/deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-guide). - ## {{fleet-server}} [air-gapped-fleet] diff --git a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md index 0a1de5d642..ff655739db 100644 --- a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md @@ -254,7 +254,7 @@ If you delete a snapshot that’s in progress, {{es}} cancels it. The snapshot p If you run {{es}} on your own hardware, we recommend that, in addition to backups, you take regular backups of the files in each node’s [`$ES_PATH_CONF` directory](../../deploy/self-managed/configure-elasticsearch.md#config-files-location) using the file backup software of your choice. Snapshots don’t back up these files. Also note that these files will differ on each node, so each node’s files should be backed up individually. ::::{important} -The `elasticsearch.keystore`, TLS keys, and [SAML](../../deploy/self-managed/configure-elasticsearch.md#ref-saml-settings), [OIDC](../../deploy/self-managed/configure-elasticsearch.md#ref-oidc-settings), and [Kerberos](../../deploy/self-managed/configure-elasticsearch.md#ref-kerberos-settings) realms private key files contain sensitive information. Consider encrypting your backups of these files. +The `elasticsearch.keystore`, TLS keys, and [SAML](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md#ref-saml-settings), [OIDC](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md#ref-oidc-settings), and [Kerberos](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md#ref-kerberos-settings) realms private key files contain sensitive information. Consider encrypting your backups of these files. :::: diff --git a/explore-analyze/alerts-cases/alerts/alerting-setup.md b/explore-analyze/alerts-cases/alerts/alerting-setup.md index a58f326a89..e5786f81bd 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-setup.md +++ b/explore-analyze/alerts-cases/alerts/alerting-setup.md @@ -16,7 +16,7 @@ mapped_pages: If you are using an **on-premises** {{stack}} deployment: * In the `kibana.yml` configuration file, add the [`xpack.encryptedSavedObjects.encryptionKey`](kibana://reference/configuration-reference/alerting-settings.md#general-alert-action-settings) setting. -* For emails to have a footer with a link back to {{kib}}, set the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) configuration setting. +* For emails to have a footer with a link back to {{kib}}, set the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) configuration setting. If you are using an **on-premises** {{stack}} deployment with [**security**](../../../deploy-manage/security.md): diff --git a/explore-analyze/alerts-cases/alerts/rule-action-variables.md b/explore-analyze/alerts-cases/alerts/rule-action-variables.md index 88471266cf..187eac1d13 100644 --- a/explore-analyze/alerts-cases/alerts/rule-action-variables.md +++ b/explore-analyze/alerts-cases/alerts/rule-action-variables.md @@ -34,7 +34,7 @@ All rule types pass the following variables: : The date the rule scheduled the action, in ISO format. `kibanaBaseUrl` -: The configured [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl). If not configured, this will be empty. +: The configured [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl). If not configured, this will be empty. `rule.id` : The rule identifier. diff --git a/explore-analyze/alerts-cases/cases/manage-cases.md b/explore-analyze/alerts-cases/cases/manage-cases.md index b6b8eca3fd..b055ea1d87 100644 --- a/explore-analyze/alerts-cases/cases/manage-cases.md +++ b/explore-analyze/alerts-cases/cases/manage-cases.md @@ -68,7 +68,7 @@ For self-managed {{kib}}: hasAuth: true/false ``` -3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. +3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting. When you subsequently add assignees to cases, they receive an email. diff --git a/explore-analyze/alerts-cases/cases/setup-cases.md b/explore-analyze/alerts-cases/cases/setup-cases.md index 7fff510b73..3fb11da9cd 100644 --- a/explore-analyze/alerts-cases/cases/setup-cases.md +++ b/explore-analyze/alerts-cases/cases/setup-cases.md @@ -58,5 +58,5 @@ You can customize sub-feature privileges for deleting cases and comments, editin For more details, refer to [{{kib}} privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md). ::::{note} -If you are using an on-premises {{kib}} deployment and you want the email notifications and the external incident management systems to contain links back to {{kib}}, you must configure the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. +If you are using an on-premises {{kib}} deployment and you want the email notifications and the external incident management systems to contain links back to {{kib}}, you must configure the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting. :::: diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index e67e06589b..9793603557 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -80,7 +80,7 @@ Import multiple objects in a single operation. 4. Click **Import**. ::::{note} -The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maxImportExportSize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maximportpayloadbytes) setting limits the overall size of the file that you can import. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedobjects-maximportexportsize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maximportpayloadbytes) setting limits the overall size of the file that you can import. :::: diff --git a/explore-analyze/visualize/maps/maps-connect-to-ems.md b/explore-analyze/visualize/maps/maps-connect-to-ems.md index b17f225b03..47a53e4761 100644 --- a/explore-analyze/visualize/maps/maps-connect-to-ems.md +++ b/explore-analyze/visualize/maps/maps-connect-to-ems.md @@ -536,7 +536,7 @@ If you cannot connect to Elastic Maps Service from the {{kib}} server or browser | --- | --- | | $$$ems-host$$$`host` | Specifies the host of the backend server. To allow remote users to connect, set the value to the IP address or DNS name of the {{hosted-ems}} container. **Default: *your-hostname***. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-host). | | `port` | Specifies the port used by the backend server. Default: **`8080`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-port). | -| `basePath` | Specify a path at which to mount the server if you are running behind a proxy. This setting cannot end in a slash (`/`). [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-basePath). | +| `basePath` | Specify a path at which to mount the server if you are running behind a proxy. This setting cannot end in a slash (`/`). [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-basepath). | | `ui` | Controls the display of the status page and the layer preview. **Default: `true`** | | `logging.level` | Verbosity of {{hosted-ems}} logs. Valid values are `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `silent`. **Default: `info`** | | `path.planet` | Path of the basemaps database. **Default: `/usr/src/app/data/planet.mbtiles`** | @@ -547,19 +547,19 @@ If you cannot connect to Elastic Maps Service from the {{kib}} server or browser | --- | --- | | `elasticsearch.host` | URL of the {{es}} instance to use for license validation. | | `elasticsearch.username` and `elasticsearch.password` | Credentials of a user with at least the `monitor` role. | -| `elasticsearch.ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by {{hosted-ems}} to establish trust when connecting to your {{es}} cluster. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-certificateAuthorities). | +| `elasticsearch.ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by {{hosted-ems}} to establish trust when connecting to your {{es}} cluster. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-certificateauthorities). | | `elasticsearch.ssl.certificate` and `elasticsearch.ssl.key`, and `elasticsearch.ssl.keyPassphrase` | Optional settings that provide the paths to the PEM-format SSL certificate and key files and the key password. These files are used to verify the identity of {{hosted-ems}} to {{es}} and are required when `xpack.security.http.ssl.client_authentication` in {{es}} is set to `required`. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-cert-key). | -| `elasticsearch.ssl.verificationMode` | Controls the verification of the server certificate that {{hosted-ems}} receives when making an outbound SSL/TLS connection to {{es}}. Valid values are "`full`", "`certificate`", and "`none`". Using "`full`" performs hostname verification, using "`certificate`" skips hostname verification, and using "`none`" skips verification entirely. **Default: `full`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-verificationMode). | +| `elasticsearch.ssl.verificationMode` | Controls the verification of the server certificate that {{hosted-ems}} receives when making an outbound SSL/TLS connection to {{es}}. Valid values are "`full`", "`certificate`", and "`none`". Using "`full`" performs hostname verification, using "`certificate`" skips hostname verification, and using "`none`" skips verification entirely. **Default: `full`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#elasticsearch-ssl-verificationmode). | **Server security settings** | | | | --- | --- | | `ssl.enabled` | Enables SSL/TLS for inbound connections to {{hosted-ems}}. When set to `true`, a certificate and its corresponding private key must be provided. **Default: `false`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-enabled). | -| `ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by the {{hosted-ems}} to establish trust when receiving inbound SSL/TLS connections from end users. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-certificateAuthorities). | +| `ssl.certificateAuthorities` | Paths to one or more PEM-encoded X.509 certificate authority (CA) certificates that make up a trusted certificate chain for {{hosted-ems}}. This chain is used by the {{hosted-ems}} to establish trust when receiving inbound SSL/TLS connections from end users. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-certificateauthorities). | | `ssl.key`, `ssl.certificate`, and `ssl.keyPassphrase` | Location of yor SSL key and certificate files and the password that decrypts the private key that is specified via `ssl.key`. This password is optional, as the key may not be encrypted. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-cert-key). | -| `ssl.supportedProtocols` | An array of supported protocols with versions.Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`. **Default: `TLSv1.1`, `TLSv1.2`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-supportedProtocols). | -| `ssl.cipherSuites` | Details on the format, and the valid options, are available via the[OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.1.1/man1/ciphers.html#CIPHER-LIST-FORMAT).**Default: `TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-cipherSuites). | +| `ssl.supportedProtocols` | An array of supported protocols with versions.Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`. **Default: `TLSv1.1`, `TLSv1.2`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-supportedprotocols). | +| `ssl.cipherSuites` | Details on the format, and the valid options, are available via the[OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.1.1/man1/ciphers.html#CIPHER-LIST-FORMAT).**Default: `TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA`**. [Equivalent {{kib}} setting](kibana://reference/configuration-reference/general-settings.md#server-ssl-ciphersuites). | #### Bind-mounted configuration [elastic-maps-server-bind-mount-config] diff --git a/solutions/observability/incident-management/configure-access-to-cases.md b/solutions/observability/incident-management/configure-access-to-cases.md index 2b4e1dddac..dcfa328096 100644 --- a/solutions/observability/incident-management/configure-access-to-cases.md +++ b/solutions/observability/incident-management/configure-access-to-cases.md @@ -8,7 +8,7 @@ mapped_pages: To access and send cases to external systems, you need the [appropriate license](https://www.elastic.co/subscriptions), and your role must have the **Cases** {{kib}} privilege as a user for the **{{observability}}** feature. ::::{note} -If you are using an on-premises {{kib}} deployment and want your email notifications and external incident management systems to contain links back to {{kib}}, configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. +If you are using an on-premises {{kib}} deployment and want your email notifications and external incident management systems to contain links back to {{kib}}, configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting. :::: diff --git a/solutions/security/detect-and-alert/view-detection-alert-details.md b/solutions/security/detect-and-alert/view-detection-alert-details.md index d180a9fb45..47a15cbc22 100644 --- a/solutions/security/detect-and-alert/view-detection-alert-details.md +++ b/solutions/security/detect-and-alert/view-detection-alert-details.md @@ -38,7 +38,7 @@ From the right panel, you can also: ::::{note} For {{stack}} users only: - If you’ve configured the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting in the `kibana.yml` file, the shareable URL is also in the `kibana.alert.url` field. You can find the field by searching for `kibana.alert.url` on the **Table** tab. + If you’ve configured the [`server.publicBaseUrl`](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting in the `kibana.yml` file, the shareable URL is also in the `kibana.alert.url` field. You can find the field by searching for `kibana.alert.url` on the **Table** tab. :::: diff --git a/solutions/security/investigate/cases-requirements.md b/solutions/security/investigate/cases-requirements.md index 26f0d928d7..4a7edb048f 100644 --- a/solutions/security/investigate/cases-requirements.md +++ b/solutions/security/investigate/cases-requirements.md @@ -11,7 +11,7 @@ mapped_urls: - You need particular subscriptions and privileges to manage case attachments. For example in {{stack}}, to add alerts to cases, you must have privileges for [managing alerts](/solutions/security/detect-and-alert/detections-requirements.md#enable-detections-ui). In {{serverless-short}}, you need the Security Analytics Complete [project feature](../../../deploy-manage/deploy/elastic-cloud/project-settings.md). -- If you have an on-premises deployment and want email notifications and external incident management systems to contain links back to {{kib}}, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. +- If you have an on-premises deployment and want email notifications and external incident management systems to contain links back to {{kib}}, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting. :::: diff --git a/solutions/security/investigate/open-manage-cases.md b/solutions/security/investigate/open-manage-cases.md index cb9ba4855d..6c95933802 100644 --- a/solutions/security/investigate/open-manage-cases.md +++ b/solutions/security/investigate/open-manage-cases.md @@ -64,7 +64,7 @@ For self-managed {{kib}}: :::: 2. Set the `notifications.connectors.default.email` {{kib}} setting to the name of your email connector. -3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicBaseUrl) setting. +3. If you want the email notifications to contain links back to the case, you must configure the [server.publicBaseUrl](kibana://reference/configuration-reference/general-settings.md#server-publicbaseurl) setting. When you subsequently add assignees to cases, they receive an email. From f8aadbef530153dd7f1b75bd72686b4b2a2c09e6 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 17:44:12 -0400 Subject: [PATCH 31/43] more --- deploy-manage/deploy/self-managed.md | 6 +++--- .../deploy/self-managed/configure-elasticsearch.md | 2 +- .../install-kibana-from-archive-on-linux-macos.md | 2 +- .../deploy/self-managed/install-kibana-on-windows.md | 2 +- .../self-managed/install-kibana-with-debian-package.md | 2 +- .../deploy/self-managed/install-kibana-with-docker.md | 2 +- .../deploy/self-managed/install-kibana-with-rpm.md | 4 ++-- deploy-manage/deploy/self-managed/install-kibana.md | 4 ++-- deploy-manage/deploy/self-managed/tools-apis.md | 6 +++--- .../manually-configure-security-in-self-managed-cluster.md | 2 +- deploy-manage/security/security-certificates-keys.md | 2 +- .../tools/snapshot-and-restore/create-snapshots.md | 2 +- .../tools/snapshot-and-restore/searchable-snapshots.md | 2 +- explore-analyze/find-and-organize/saved-objects.md | 2 +- .../stack-docs/elastic-stack/installing-stack-demo-self.md | 4 ++-- 15 files changed, 22 insertions(+), 22 deletions(-) diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index a500b9f46e..5544042054 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -25,7 +25,7 @@ Self-hosted options: * [{{eck}}](/deploy-manage/deploy/cloud-on-k8s.md) * [{{ece}}](/deploy-manage/deploy/cloud-enterprise.md) -For a comparison of these deployment options, refer to [Choosing your deployment type](/deploy-manage/deploy#choosing-your-deployment-type.md) and [](/deploy-manage/deploy/deployment-comparison.md). +For a comparison of these deployment options, refer to [Choosing your deployment type](/deploy-manage/deploy.md#choosing-your-deployment-type) and [](/deploy-manage/deploy/deployment-comparison.md). ::: ## Section overview @@ -42,14 +42,14 @@ Learn how to install and configure {{es}}. {{es}} is the distributed search and * [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md) * [](/deploy-manage/deploy/self-managed/important-system-configuration.md): Prepare your environment for an {{es}} installation. - * [](/deploy-manage/deploy/self-managed/deploy-cluster#installation-methods.md): Install and run {{es}} using one of our install packages or container images. + * [](/deploy-manage/deploy/self-managed/deploy-cluster.md#installation-methods): Install and run {{es}} using one of our install packages or container images. * [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md): Quickly set up {{es}} and {{kib}} in Docker for local development or testing. * [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md): Learn how to make configuration changes to {{es}} * [](/deploy-manage/deploy/self-managed/important-settings-configuration.md): Learn about key settings required for production environments. * [](/deploy-manage/deploy/self-managed/plugins.md): Learn about how to extend {{es}} functionality with plugins. :::{note} - For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference.md). + For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference/index.md). ::: ### Deploying Kibana diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index a9563c97fe..b067865c69 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -14,7 +14,7 @@ The configuration files should contain settings which are node-specific (such as ## Available settings -For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference.md). +For a complete list of settings that you can apply to your {{es}} cluster, refer to the [Elasticsearch configuration reference](elasticsearch://reference/elasticsearch/configuration-reference/index.md). For a list of settings that must be configured before using your cluster in production, refer to [](/deploy-manage/deploy/self-managed/important-settings-configuration.md). diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 90e72d800d..e264d709e2 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -108,7 +108,7 @@ This is very convenient because you don’t have to create any directories to st | --- | --- | --- | --- | | home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | | bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | -| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | +| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure-kibana.md)` | | data | The location of the data files written to disk by {{kib}} and its plugins | `$KIBANA_HOME\data` | | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `$KIBANA_HOME\plugins` | | diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md index 471835c2e5..98e0d93e04 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -67,7 +67,7 @@ This is very convenient because you don’t have to create any directories to st | --- | --- | --- | --- | | home | {{kib}} home directory or `$KIBANA_HOME` | Directory created by unpacking the archive | | | bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `$KIBANA_HOME\bin` | | -| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure.md)` | +| config | Configuration files including `kibana.yml` | `$KIBANA_HOME\config` | `[KBN_PATH_CONF](configure-kibana.md)` | | | data | `The location of the data files written to disk by {{kib}} and its plugins` | `$KIBANA_HOME\data` | | | plugins | `Plugin files location. Each plugin will be contained in a subdirectory.` | `$KIBANA_HOME\plugins` | diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index ca51d5fa18..3f00b15fcd 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -139,7 +139,7 @@ The Debian package places config files, logs, and the data directory in the appr | --- | --- | --- | --- | | home | {{kib}} home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | | bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | -| config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure.md)` | +| config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure-kibana.md)` | | data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md index 79f789e750..17c5375be7 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md @@ -148,7 +148,7 @@ docker rm kib01 ## Configure {{kib}} on Docker [configuring-kibana-docker] -The Docker images provide several methods for configuring {{kib}}. The conventional approach is to provide a `kibana.yml` file as described in [Configuring Kibana](configure.md), but it’s also possible to use environment variables to define settings. +The Docker images provide several methods for configuring {{kib}}. The conventional approach is to provide a `kibana.yml` file as described in [](configure-kibana.md), but it’s also possible to use environment variables to define settings. ### Bind-mounted configuration [bind-mount-config] diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 84115f798e..367324e3e2 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -15,7 +15,7 @@ applies_to: The RPM for {{kib}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{kib}} on any RPM-based system such as OpenSuSE, SLES, Red Hat, and Oracle Enterprise. ::::{note} -RPM install is not supported on distributions with old versions of RPM, such as SLES 11. Refer to [Install from archive on Linux or macOS](install-from-archive-on-linux-macos.md) instead. +RPM install is not supported on distributions with old versions of RPM, such as SLES 11. Refer to [Install from archive on Linux or macOS](install-kibana-from-archive-on-linux-macos.md) instead. :::: :::{include} _snippets/trial.md @@ -130,7 +130,7 @@ The RPM places config files, logs, and the data directory in the appropriate loc | --- | --- | --- | --- | | home | {{kib}} home directory or `$KIBANA_HOME` | `/usr/share/kibana` | | | bin | Binary scripts including `kibana` to start the {{kib}} server and `kibana-plugin` to install plugins | `/usr/share/kibana/bin` | | -| config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure.md)` | +| config | Configuration files including `kibana.yml` | `/etc/kibana` | `[KBN_PATH_CONF](configure-kibana.md)` | | data | The location of the data files written to disk by {{kib}} and its plugins | `/var/lib/kibana` | `path.data` | | logs | Logs files location | `/var/log/kibana` | `[Logging configuration](../../monitor/logging-configuration/kibana-logging.md)` | | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | `/usr/share/kibana/plugins` | | diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index 680d00ca45..7d3dc5edab 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -33,8 +33,8 @@ To support certain older Linux platforms (most notably CentOS7/RHEL7), {{kib}} f | Format | Description | Instructions | | --- | --- | --- | -| `tar.gz` | The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. | [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md)| -| `zip` | The `zip` package is the only supported package for Windows.| [Install on Windows](/deploy-manage/deploy/self-managed/install-on-windows.md)| +| `tar.gz` | The `tar.gz` packages are provided for installation on Linux and Darwin and are the easiest choice for getting started with {{kib}}. | [Install from archive on Linux or macOS](/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md)| +| `zip` | The `zip` package is the only supported package for Windows.| [Install on Windows](/deploy-manage/deploy/self-managed/install-kibana-on-windows.md)| | `deb` | The `deb` package is suitable for Debian, Ubuntu, and other Debian-based systems. Debian packages may be downloaded from the Elastic website or from our Debian repository. | [Install with Debian package](/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md) | | `rpm` | The `rpm` package is suitable for installation on Red Hat, SLES, OpenSuSE and other RPM-based systems. RPMs may be downloaded from the Elastic website or from our RPM repository. | [Install with RPM](/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md) | | `docker` | Images are available for running {{kib}} as a Docker container. They may be downloaded from the Elastic Docker Registry. | [Running {{kib}} on Docker](/deploy-manage/deploy/self-managed/install-kibana-with-docker.md) | diff --git a/deploy-manage/deploy/self-managed/tools-apis.md b/deploy-manage/deploy/self-managed/tools-apis.md index 095e875e77..f6b083e61d 100644 --- a/deploy-manage/deploy/self-managed/tools-apis.md +++ b/deploy-manage/deploy/self-managed/tools-apis.md @@ -12,7 +12,7 @@ Review key resources that can be used to interact with and manage self-managed c * [{{es}} API](https://www.elastic.co/docs/api/doc/elasticsearch/): The core API for interacting with a self-managed {{es}} cluster, or any cluster running {{stack}}. Configure {{es}} functionality and settings, query your data, and more. - Refer to [REST APIs](/reference/elasticsearch/rest-apis.md) to learn about API conventions and view API usage examples. + Refer to [REST APIs](elasticsearch://reference/elasticsearch/rest-apis/index.md) to learn about API conventions and view API usage examples. * [{{kib}} API](https://www.elastic.co/docs/api/doc/kibana/): manage {{kib}} resources such as connectors, data views, and saved objects. :::{tip} @@ -25,5 +25,5 @@ Learn how to [generate API keys for your self-managed cluster](/deploy-manage/ap ## Other -* [{{es}} command line tools](elasticsearch://reference/elasticsearch/command-line-tools.md): Utilities for configuring security and performing other tasks from the command line. -* [Plugins](elasticsearch://reference/elasticsearch/elasticsearch-plugins.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file +* [{{es}} command line tools](elasticsearch://reference/elasticsearch/command-line-tools/index.md): Utilities for configuring security and performing other tasks from the command line. +* [Plugins](elasticsearch://reference/elasticsearch/elasticsearch-plugins/index.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file diff --git a/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md b/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md index 0524232432..5e61f818c1 100644 --- a/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md +++ b/deploy-manage/security/manually-configure-security-in-self-managed-cluster.md @@ -87,7 +87,7 @@ When you start {{es}} for the first time, the node startup process tries to auto * Whether security is already configured * If the startup process can modify the node configuration -If any of those checks fail, there’s a good indication that you [manually configured security](../../../deploy-manage/security/manually-configure-security-in-self-managed-cluster.md), or don’t want security to be configured automatically. In these cases, the node starts normally using the existing configuration. +If any of those checks fail, there’s a good indication that you manually configured security, or don’t want security to be configured automatically. In these cases, the node starts normally using the existing configuration. ::::{important} If you redirect {{es}} output to a file, security autoconfiguration is skipped. Autoconfigured credentials can only be viewed on the terminal the first time you start {{es}}. If you need to redirect output to a file, start {{es}} without redirection the first time and use redirection on all subsequent starts. diff --git a/deploy-manage/security/security-certificates-keys.md b/deploy-manage/security/security-certificates-keys.md index 6d41517fff..5626f1ca6d 100644 --- a/deploy-manage/security/security-certificates-keys.md +++ b/deploy-manage/security/security-certificates-keys.md @@ -15,7 +15,7 @@ When you start {{es}} for the first time, the following security configuration o You can then start {{kib}} and enter the enrollment token, which is valid for 30 minutes. This token automatically applies the security settings from your {{es}} cluster, authenticates to {{es}} with the built-in `kibana` service account, and writes the security configuration to `kibana.yml`. ::::{note} -There are [some cases](../deploy/self-managed/installing-elasticsearch.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. +There are [some cases](/deploy-manage/security/security-certificates-keys.md#stack-skip-auto-configuration) where security can’t be configured automatically because the node startup process detects that the node is already part of a cluster, or that security is already configured or explicitly disabled. :::: diff --git a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md index ff655739db..b2a48b2aec 100644 --- a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md @@ -324,7 +324,7 @@ Any index or data stream that’s part of the feature state will display in a sn ## Dedicated cluster state snapshots [cluster-state-snapshots] -Some feature states contain sensitive data. For example, the `security` feature state includes system indices that may contain user names and encrypted password hashes. Because passwords are stored using [cryptographic hashes](../../deploy/self-managed/configure-elasticsearch.md#hashing-settings), the disclosure of a snapshot would not automatically enable a third party to authenticate as one of your users or use API keys. However, it would disclose confidential information, and if a third party can modify snapshots, they could install a back door. +Some feature states contain sensitive data. For example, the `security` feature state includes system indices that may contain user names and encrypted password hashes. Because passwords are stored using [cryptographic hashes](elasticsearch://reference/elasticsearch/configuration-reference/security-settings#hashing-settings), the disclosure of a snapshot would not automatically enable a third party to authenticate as one of your users or use API keys. However, it would disclose confidential information, and if a third party can modify snapshots, they could install a back door. To better protect this data, consider creating a dedicated repository and {{slm-init}} policy for snapshots of the cluster state. This lets you strictly limit and audit access to the repository. diff --git a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md index cda64f7fa5..f0bf290b34 100644 --- a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md @@ -126,7 +126,7 @@ xpack.searchable.snapshot.shared_cache.size: 4TB ``` ::::{important} -You can only configure these settings on nodes with the [`data_frozen`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#data-frozen-node) role. Additionally, nodes with a shared cache can only have a single [data path](../../deploy/self-managed/configure-elasticsearch.md#path-settings). +You can only configure these settings on nodes with the [`data_frozen`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#data-frozen-node) role. Additionally, nodes with a shared cache can only have a single [data path](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings). :::: diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index 9793603557..495ffd8979 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -80,7 +80,7 @@ Import multiple objects in a single operation. 4. Click **Import**. ::::{note} -The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedobjects-maximportexportsize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedObjects-maximportpayloadbytes) setting limits the overall size of the file that you can import. +The [`savedObjects.maxImportExportSize`](kibana://reference/configuration-reference/general-settings.md#savedobjects-maximportexportsize) configuration setting limits the number of saved objects to include in the file. The [`savedObjects.maxImportPayloadBytes`](kibana://reference/configuration-reference/general-settings.md#savedobjects-maximportpayloadbytes) setting limits the overall size of the file that you can import. :::: diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index 3a9ecd5563..349c4d0612 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -194,7 +194,7 @@ Before moving ahead to configure additional {{es}} nodes, you’ll need to updat ## Step 4: Set up a second {{es}} node [install-stack-self-elasticsearch-second] -To set up a second {{es}} node, the initial steps are similar to those that you followed for [Step 1: Set up the first {{es}} node](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-first). +To set up a second {{es}} node, the initial steps are similar to those that you followed for [Step 1: Set up the first {{es}} node](#install-stack-self-elasticsearch-first). 1. Log in to the host where you’d like to set up your second {{es}} instance. 2. Create a working directory for the installation package: @@ -361,7 +361,7 @@ To set up a second {{es}} node, the initial steps are similar to those that you ## Step 5: Set up additional {{es}} nodes [install-stack-self-elasticsearch-third] -To set up your next {{es}} node, follow exactly the same steps as you did previously in [Step 4: Set up a second {{es}} node](../../../deploy-manage/deploy/self-managed/installing-elasticsearch.md#install-stack-self-elasticsearch-second). The process is identical for each additional {{es}} node that you would like to add to the cluster. As a recommended best practice, create a new enrollment token for each new node that you add. +To set up your next {{es}} node, follow exactly the same steps as you did previously in [Step 4: Set up a second {{es}} node](#install-stack-self-elasticsearch-second). The process is identical for each additional {{es}} node that you would like to add to the cluster. As a recommended best practice, create a new enrollment token for each new node that you add. ## Step 6: Install {{kib}} [install-stack-self-kibana] From 3ccb9f75798c6c8b43cc34278d93a94fc5f1a529 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 17:52:28 -0400 Subject: [PATCH 32/43] more fixes --- deploy-manage/deploy/self-managed.md | 2 +- deploy-manage/deploy/self-managed/tools-apis.md | 2 +- .../clusters-nodes-shards/node-roles.md | 2 +- .../maintenance/add-and-remove-elasticsearch-nodes.md | 2 +- deploy-manage/tools/snapshot-and-restore/create-snapshots.md | 2 +- deploy-manage/users-roles.md | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 5544042054..79d51f4c2f 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -42,7 +42,7 @@ Learn how to install and configure {{es}}. {{es}} is the distributed search and * [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md) * [](/deploy-manage/deploy/self-managed/important-system-configuration.md): Prepare your environment for an {{es}} installation. - * [](/deploy-manage/deploy/self-managed/deploy-cluster.md#installation-methods): Install and run {{es}} using one of our install packages or container images. + * [](/deploy-manage/deploy/self-managed/installing-elasticsearch.md#installation-methods): Install and run {{es}} using one of our install packages or container images. * [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md): Quickly set up {{es}} and {{kib}} in Docker for local development or testing. * [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md): Learn how to make configuration changes to {{es}} * [](/deploy-manage/deploy/self-managed/important-settings-configuration.md): Learn about key settings required for production environments. diff --git a/deploy-manage/deploy/self-managed/tools-apis.md b/deploy-manage/deploy/self-managed/tools-apis.md index f6b083e61d..3c03b63156 100644 --- a/deploy-manage/deploy/self-managed/tools-apis.md +++ b/deploy-manage/deploy/self-managed/tools-apis.md @@ -26,4 +26,4 @@ Learn how to [generate API keys for your self-managed cluster](/deploy-manage/ap ## Other * [{{es}} command line tools](elasticsearch://reference/elasticsearch/command-line-tools/index.md): Utilities for configuring security and performing other tasks from the command line. -* [Plugins](elasticsearch://reference/elasticsearch/elasticsearch-plugins/index.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file +* [Plugins](elasticsearch://reference/elasticsearch-plugins/index.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file diff --git a/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md b/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md index c3a2178a90..8f759378b9 100644 --- a/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md +++ b/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md @@ -160,7 +160,7 @@ In a multi-tier deployment architecture, you use specialized data roles to assig If you want to include a node in all tiers, or if your cluster does not use multiple tiers, then you can use the generic `data` role. -[Cluster shard limits](../../deploy/self-managed/configure-elasticsearch.md#cluster-shard-limit) prevent creation of more than 1000 non-frozen shards per node, and 3000 frozen shards per dedicated frozen node. Make sure you have enough nodes of each type in your cluster to handle the number of shards you need. +[Cluster shard limits](elasticsearch://reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-shard-limit) prevent creation of more than 1000 non-frozen shards per node, and 3000 frozen shards per dedicated frozen node. Make sure you have enough nodes of each type in your cluster to handle the number of shards you need. ::::{warning} If you assign a node to a specific tier using a specialized data role, then you shouldn’t also assign it the generic `data` role. The generic `data` role takes precedence over specialized data roles. diff --git a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md index a618d51dad..d515ecd7ab 100644 --- a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md +++ b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md @@ -43,7 +43,7 @@ To add a node to a cluster running on multiple machines, you must also set [`dis :::: -:::{include} deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +:::{include} /deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md ::: ## Master-eligible nodes [add-elasticsearch-nodes-master-eligible] diff --git a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md index b2a48b2aec..1aed2270d1 100644 --- a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md @@ -324,7 +324,7 @@ Any index or data stream that’s part of the feature state will display in a sn ## Dedicated cluster state snapshots [cluster-state-snapshots] -Some feature states contain sensitive data. For example, the `security` feature state includes system indices that may contain user names and encrypted password hashes. Because passwords are stored using [cryptographic hashes](elasticsearch://reference/elasticsearch/configuration-reference/security-settings#hashing-settings), the disclosure of a snapshot would not automatically enable a third party to authenticate as one of your users or use API keys. However, it would disclose confidential information, and if a third party can modify snapshots, they could install a back door. +Some feature states contain sensitive data. For example, the `security` feature state includes system indices that may contain user names and encrypted password hashes. Because passwords are stored using [cryptographic hashes](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md#hashing-settings), the disclosure of a snapshot would not automatically enable a third party to authenticate as one of your users or use API keys. However, it would disclose confidential information, and if a third party can modify snapshots, they could install a back door. To better protect this data, consider creating a dedicated repository and {{slm-init}} policy for snapshots of the cluster state. This lets you strictly limit and audit access to the repository. diff --git a/deploy-manage/users-roles.md b/deploy-manage/users-roles.md index bebea33dfe..edf6d170df 100644 --- a/deploy-manage/users-roles.md +++ b/deploy-manage/users-roles.md @@ -33,7 +33,7 @@ Preventing unauthorized access is only one element of a complete security strate ```{applies_to} deployment: ess: all -:serverless: all +serverless: all ``` If you’re using {{ecloud}}, then you can perform the following tasks to control access to your Cloud organization, your Cloud Hosted deployments, and your Cloud Serverless projects: @@ -81,7 +81,7 @@ You can't manage users and roles for {{eck}} clusters at the orchestrator level. ## Project level ```{applies_to} -:serverless: all +serverless: all ``` As an extension of the [predefined instance access roles](/deploy-manage/users-roles/cloud-organization/user-roles.md#ec_instance_access_roles) offered for {{serverless-short}} projects, you can create custom roles at the project level to provide more granular control, and provide users with only the access they need within specific projects. From cd024e91c4b0b65cddc4b87dc31766b911051d26 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Mon, 10 Mar 2025 17:56:42 -0400 Subject: [PATCH 33/43] syntax fix --- deploy-manage/users-roles.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-manage/users-roles.md b/deploy-manage/users-roles.md index edf6d170df..496d7d7927 100644 --- a/deploy-manage/users-roles.md +++ b/deploy-manage/users-roles.md @@ -55,7 +55,7 @@ For {{ech}} deployments, you can configure SSO at the organization level, the de ```{applies_to} deployment: ece: all -::: +``` Control access to your {{ece}} [orchestrator](/deploy-manage/deploy/cloud-enterprise/deploy-an-orchestrator.md) and deployments. From 95cc1629eaafe59bb71cb916ecd5abccd10e825a Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Tue, 11 Mar 2025 10:16:14 -0400 Subject: [PATCH 34/43] Update deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md --- .../deploy/elastic-cloud/restrictions-known-problems.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md index 19f484aea1..d055d7c32a 100644 --- a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md +++ b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md @@ -55,7 +55,7 @@ Elasticsearch APIs $$$ec-restrictions-apis-kibana$$$ Kibana APIs -: There are no rate limits restricting your use of the Kibana APIs. However, Kibana features are affected by the [Kibana configuration settings](/deploy-manage/deploy/self-managed/configure-kibana.md), not all of which are supported in {{ecloud}}. For a list of what settings are currently supported, check [Add Kibana user settings](edit-stack-settings.md). For all details about using the Kibana APIs, check the [Kibana API reference documentation](https://www.elastic.co/guide/en/kibana/current/api.html). +: There are no rate limits restricting your use of the Kibana APIs. However, Kibana features are affected by the [Kibana configuration settings](kibana://reference/configuration-reference.md), not all of which are supported in {{ecloud}}. For a list of what settings are currently supported, check [Add Kibana user settings](edit-stack-settings.md). For all details about using the Kibana APIs, check the [Kibana API reference documentation](https://www.elastic.co/docs/api/doc/kibana/). ## Transport client [ec-restrictions-transport-client] From 1a5e88d7cb3341b46a9a7f1768f9e91213a34bf7 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Tue, 11 Mar 2025 14:09:23 -0400 Subject: [PATCH 35/43] too many hashes --- .../cloud/cloud-heroku/ech-manage-kibana-settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md index 7000c39a7b..059c643555 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md @@ -112,7 +112,7 @@ If a setting is not supported by Elasticsearch Add-On for Heroku, you will get a : The maximum payload size in bytes for incoming server requests. Default: 1048576. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-maxpayload). `server.securityResponseHeaders.strictTransportSecurity` -: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md##server-securityresponseheaders-stricttransportsecurity). +: Controls whether the [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-stricttransportsecurity). `server.securityResponseHeaders.xContentTypeOptions` : Controls whether the [`X-Content-Type-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options) header is used in all responses to the client from the Kibana server. To learn more, check [Configure Kibana](kibana://reference/configuration-reference/general-settings.md#server-securityresponseheaders-xcontenttypeoptions). From 089e652d370e648c14d7e670ae7a76165259d686 Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Thu, 13 Mar 2025 12:32:05 -0400 Subject: [PATCH 36/43] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Edu González de la Herrán <25320357+eedugon@users.noreply.github.com> --- deploy-manage/deploy/self-managed.md | 6 +++--- .../deploy/self-managed/_snippets/cmd-line-config.md | 3 +-- deploy-manage/deploy/self-managed/air-gapped-install.md | 4 ++-- .../self-managed/install-elasticsearch-docker-prod.md | 4 ++-- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 79d51f4c2f..02db227372 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -15,7 +15,7 @@ If you want to install Elastic on your own premises without the assistance of an To quickly set up {{es}} and {{kib}} in Docker for local development or testing, jump to [](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). :::{admonition} Simplify the deployment process -Self-managed clusters are useful for local development, and for exploring Elastic features. However, Elastic offer several deployment options that can simplify the process of deploying and managing multi-node deployments, especially in production. They also allow you to deploy and manage multiple deployments from a single surface. +Self-managed clusters are useful for local development, and for exploring Elastic features. However, Elastic offers several deployment options that can simplify the process of deploying and managing multi-node deployments, especially in production. They also allow you to deploy and manage multiple deployments from a single surface. Managed by Elastic: * [{{serverless-full}}](/deploy-manage/deploy/elastic-cloud/serverless.md) @@ -54,7 +54,7 @@ Learn how to install and configure {{es}}. {{es}} is the distributed search and ### Deploying Kibana -After you deploy {{es}}, you can install {{kib}}. {{kib}} provides the user interface for all Elastic solutions. It’s a powerful tool for visualizing and analyzing your data, and for managing and monitoring the {{stack}}. Although {{kib}} is not required to use {{es}}, it's required for most use cases. +After you deploy {{es}}, you can install {{kib}}. {{kib}} provides the user interface for all Elastic solutions. It’s a powerful tool for [visualizing and analyzing](/explore-analyze/index.md) your data, and for managing and monitoring the {{stack}}. Although {{kib}} is not required to use {{es}}, it's required for most [use cases](/solutions/index.md). * [](/deploy-manage/deploy/self-managed/install-kibana.md): Install {{kib}} using one of our install packages or container images, and enroll {{kib}} with your {{es}} cluster. * [](/deploy-manage/deploy/self-managed/configure-kibana.md): Learn how to make configuration changes to {{kib}}. @@ -81,7 +81,7 @@ Review these other sections for critical information about securing and managing Learn how to secure your Elastic environment to restrict access to only authorized parties, and allow communication between your environment and external parties. * [](/deploy-manage/security.md): Learn about security features that prevent bad actors from tampering with your data, and encrypt communications to, from, and within your cluster. -* [](/deploy-manage/users-roles/cluster-or-deployment-auth.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. +* [Users and roles](/deploy-manage/users-roles/cluster-or-deployment-auth.md): Set up authentication and authorization for your cluster, and learn about the underlying security technologies that {{es}} uses to authenticate and authorize requests internally and across services. * [](/deploy-manage/manage-spaces.md): Learn how to organize content in {{kib}}, and restrict access to this content to specific users. * [](/deploy-manage/api-keys.md): Authenticate and authorize programmatic access to your deployments and {{es}} resources. * [](/deploy-manage/manage-connectors.md): Manage connection information between Elastic and third-party systems. diff --git a/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md index 7fa3ebbb9b..fcbf59f5dd 100644 --- a/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md +++ b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md @@ -1,8 +1,7 @@ {{es}} loads its configuration from the following location by default: -``` +```sh subs=true {{es-conf}}{{slash}}elasticsearch.yml -``` The format of this config file is explained in [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index 8465c8c778..f41118fbb0 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -40,12 +40,12 @@ Specifically: ## {{beats}} [air-gapped-beats] -Elastic {{beats}} are light-weight data shippers. They do not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{beats}} documentation](asciidocalypse://docs/beats/docs/reference/index.md). +Elastic {{beats}} are light-weight data shippers. They do not require any special configuration in air-gapped environments. To learn more, refer to the [{{beats}} documentation](asciidocalypse://docs/beats/docs/reference/index.md). ## {{ls}} [air-gapped-logstash] -{{ls}} is a versatile data shipping and processing application. It does not require any unique setup in the air-gapped scenario. To learn more, refer to the [{{ls}} documentation](asciidocalypse://docs/logstash/docs/reference/index.md). +{{ls}} is a versatile data shipping and processing application. It does not require any special configuration in air-gapped environments. To learn more, refer to the [{{ls}} documentation](asciidocalypse://docs/logstash/docs/reference/index.md). ## {{agent}} [air-gapped-elastic-agent] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index 383da82141..58c9d19140 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -186,9 +186,9 @@ docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ## Always bind data volumes [_always_bind_data_volumes] -You should use a volume bound on `/usr/share/elasticsearch/data` for the following reasons: +You should use a volume bound to `/usr/share/elasticsearch/data` for the following reasons: -1. The data of your {{es}} node won’t be lost if the container is killed. +1. The data of your {{es}} node won’t be lost if the container is deleted. 2. {{es}} is I/O sensitive and the Docker storage driver is not ideal for fast I/O. 3. It allows the use of advanced [Docker volume plugins](https://docs.docker.com/engine/extend/plugins/#volume-plugins). From 9ef75baf422bc881e401f01d47dd8ee2efadffd7 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 14:52:42 -0400 Subject: [PATCH 37/43] add kibana command line tools --- deploy-manage/deploy/self-managed/tools-apis.md | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy-manage/deploy/self-managed/tools-apis.md b/deploy-manage/deploy/self-managed/tools-apis.md index 3c03b63156..08c13abeed 100644 --- a/deploy-manage/deploy/self-managed/tools-apis.md +++ b/deploy-manage/deploy/self-managed/tools-apis.md @@ -26,4 +26,5 @@ Learn how to [generate API keys for your self-managed cluster](/deploy-manage/ap ## Other * [{{es}} command line tools](elasticsearch://reference/elasticsearch/command-line-tools/index.md): Utilities for configuring security and performing other tasks from the command line. +* [{{kib}} command line tools](kibana://reference/commands.md): Utilities for performing security and connectivity related tasks for {{kib}} from the command line. * [Plugins](elasticsearch://reference/elasticsearch-plugins/index.md): Plugins extend core {{es}} functionality. Choose from an existing plugin, or [build your own](elasticsearch://extend/index.md). \ No newline at end of file From 80b83904b292313c5af2ce167981526ecd25d07a Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 14:53:02 -0400 Subject: [PATCH 38/43] changes related to subs=true --- .../install-using-yaml-manifest-quickstart.md | 4 ++-- .../k8s-openshift-deploy-operator.md | 2 +- .../cloud-on-k8s/k8s-service-mesh-istio.md | 2 +- .../cloud-on-k8s/k8s-service-mesh-linkerd.md | 2 +- deploy-manage/deploy/self-managed.md | 2 -- .../deploy/self-managed/_snippets/ca-cert.md | 2 +- .../self-managed/_snippets/ca-fingerprint.md | 2 +- .../_snippets/check-es-running.md | 2 +- .../self-managed/_snippets/cmd-line-config.md | 5 +++-- .../self-managed/_snippets/connect-clients.md | 2 +- .../self-managed/_snippets/enroll-nodes.md | 6 ++--- .../self-managed/_snippets/pw-env-var.md | 2 +- .../deploy/self-managed/_snippets/wolfi.md | 2 +- .../deploy/self-managed/access-kibana.md | 6 ++++- .../deploy/self-managed/air-gapped-install.md | 10 ++++----- .../install-elasticsearch-docker-basic.md | 22 +++++++++---------- .../install-elasticsearch-docker-compose.md | 6 ++--- .../install-elasticsearch-docker-configure.md | 6 ++--- .../install-elasticsearch-docker-prod.md | 6 ++--- ...asticsearch-from-archive-on-linux-macos.md | 6 ++--- ...stall-elasticsearch-with-debian-package.md | 2 +- .../install-elasticsearch-with-rpm.md | 2 +- ...stall-elasticsearch-with-zip-on-windows.md | 12 +++++----- ...tall-kibana-from-archive-on-linux-macos.md | 6 ++--- .../self-managed/install-kibana-on-windows.md | 2 +- .../install-kibana-with-debian-package.md | 2 +- .../install-kibana-with-docker.md | 22 +++++++++---------- .../self-managed/install-kibana-with-rpm.md | 2 +- .../start-stop-elasticsearch.md | 4 ++-- docset.yml | 4 ++++ get-started/the-stack.md | 2 -- .../observability/apps/apm-server-binary.md | 8 +++---- .../logs/ecs-formatted-application-logs.md | 8 +++---- .../logs/plaintext-application-logs.md | 10 ++++----- 34 files changed, 91 insertions(+), 92 deletions(-) diff --git a/deploy-manage/deploy/cloud-on-k8s/install-using-yaml-manifest-quickstart.md b/deploy-manage/deploy/cloud-on-k8s/install-using-yaml-manifest-quickstart.md index 3b8c740e3b..798737caf6 100644 --- a/deploy-manage/deploy/cloud-on-k8s/install-using-yaml-manifest-quickstart.md +++ b/deploy-manage/deploy/cloud-on-k8s/install-using-yaml-manifest-quickstart.md @@ -39,7 +39,7 @@ To deploy the ECK operator: 1. Install Elastic's [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) with [`create`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/): - ```sh + ```sh subs=true kubectl create -f https://download.elastic.co/downloads/eck/{{eck_version}}/crds.yaml ``` @@ -58,7 +58,7 @@ To deploy the ECK operator: 2. Using [`kubectl apply`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_apply/), install the operator with its RBAC rules: - ```sh + ```sh subs=true kubectl apply -f https://download.elastic.co/downloads/eck/{{eck_version}}/operator.yaml ``` diff --git a/deploy-manage/deploy/cloud-on-k8s/k8s-openshift-deploy-operator.md b/deploy-manage/deploy/cloud-on-k8s/k8s-openshift-deploy-operator.md index 70482bb90e..dde2d316e5 100644 --- a/deploy-manage/deploy/cloud-on-k8s/k8s-openshift-deploy-operator.md +++ b/deploy-manage/deploy/cloud-on-k8s/k8s-openshift-deploy-operator.md @@ -12,7 +12,7 @@ This page shows the installation steps to deploy ECK in Openshift: 1. Apply the manifests the same way as described in [](./install-using-yaml-manifest-quickstart.md) document: - ```shell + ```shell subs=true oc create -f https://download.elastic.co/downloads/eck/{{eck_version}}/crds.yaml oc apply -f https://download.elastic.co/downloads/eck/{{eck_version}}/operator.yaml ``` diff --git a/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-istio.md b/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-istio.md index fbfe35bc6d..beafec1b75 100644 --- a/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-istio.md +++ b/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-istio.md @@ -35,7 +35,7 @@ The operator itself must be connected to the service mesh to deploy and manage E 2. Install ECK: - ```sh + ```sh subs=true kubectl create -f https://download.elastic.co/downloads/eck/{{eck_version}}/crds.yaml kubectl apply -f https://download.elastic.co/downloads/eck/{{eck_version}}/operator.yaml ``` diff --git a/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-linkerd.md b/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-linkerd.md index a60e034f1a..820daad1c0 100644 --- a/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-linkerd.md +++ b/deploy-manage/deploy/cloud-on-k8s/k8s-service-mesh-linkerd.md @@ -19,7 +19,7 @@ These instructions have been tested with Linkerd 2.7.0. In order to connect the operator to the service mesh, Linkerd sidecar must be injected into the ECK deployment. This can be done during installation as follows: -```sh +```sh subs=true kubectl create -f https://download.elastic.co/downloads/eck/{{eck_version}}/crds.yaml linkerd inject https://download.elastic.co/downloads/eck/{{eck_version}}/operator.yaml | kubectl apply -f - ``` diff --git a/deploy-manage/deploy/self-managed.md b/deploy-manage/deploy/self-managed.md index 02db227372..52bd95d4a5 100644 --- a/deploy-manage/deploy/self-managed.md +++ b/deploy-manage/deploy/self-managed.md @@ -1,8 +1,6 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/dependencies-versions.html -sub: - stack-version: "9.0.0" applies_to: deployment: self: diff --git a/deploy-manage/deploy/self-managed/_snippets/ca-cert.md b/deploy-manage/deploy/self-managed/_snippets/ca-cert.md index f1636a8139..2129c05d98 100644 --- a/deploy-manage/deploy/self-managed/_snippets/ca-cert.md +++ b/deploy-manage/deploy/self-managed/_snippets/ca-cert.md @@ -1,6 +1,6 @@ If your library doesn’t support a method of validating the fingerprint, the auto-generated CA certificate is created in the following directory on each {{es}} node: -```sh +```sh subs=true {{es-conf}}{{slash}}certs{{slash}}http_ca.crt ``` diff --git a/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md b/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md index 413d05829d..6b5127f5f2 100644 --- a/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md +++ b/deploy-manage/deploy/self-managed/_snippets/ca-fingerprint.md @@ -9,6 +9,6 @@ openssl x509 -fingerprint -sha256 -in config/certs/http_ca.crt The command returns the security certificate, including the fingerprint. The `issuer` should be `{{es}} security auto-configuration HTTP CA`. ```sh -issuer= /CN={{es}} security auto-configuration HTTP CA +issuer= /CN=Elasticsearch security auto-configuration HTTP CA SHA256 Fingerprint= ``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md index 8b49e3484b..8ad6e585ad 100644 --- a/deploy-manage/deploy/self-managed/_snippets/check-es-running.md +++ b/deploy-manage/deploy/self-managed/_snippets/check-es-running.md @@ -1,6 +1,6 @@ You can test that your {{es}} node is running by sending an HTTPS request to port `9200` on `localhost`: -```sh +```sh subs=true curl --cacert {{es-conf}}{{slash}}certs{{slash}}http_ca.crt {{escape}} <1> -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <2> ``` diff --git a/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md index fcbf59f5dd..9b96f2bd8c 100644 --- a/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md +++ b/deploy-manage/deploy/self-managed/_snippets/cmd-line-config.md @@ -2,13 +2,14 @@ ```sh subs=true {{es-conf}}{{slash}}elasticsearch.yml +``` The format of this config file is explained in [](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: -```sh -.\bin\elasticsearch.bat -Ecluster.name=my_cluster -Enode.name=node_1 +```sh subs=true +.{{slash}}bin{{slash}}elasticsearch{{auto}} -Ecluster.name=my_cluster -Enode.name=node_1 ``` :::{note} diff --git a/deploy-manage/deploy/self-managed/_snippets/connect-clients.md b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md index 9b13dd80b9..10e0dc187e 100644 --- a/deploy-manage/deploy/self-managed/_snippets/connect-clients.md +++ b/deploy-manage/deploy/self-managed/_snippets/connect-clients.md @@ -3,7 +3,7 @@ When you start {{es}} for the first time, TLS is configured automatically for the HTTP layer. A CA certificate is generated and stored on disk at: -```sh +```sh subs=true {{es-conf}}{{slash}}certs{{slash}}http_ca.crt ``` diff --git a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md index 00abb09d51..f120b9049b 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md +++ b/deploy-manage/deploy/self-managed/_snippets/enroll-nodes.md @@ -8,7 +8,7 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti 1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](elasticsearch://reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. - ```sh + ```sh subs=true bin{{slash}}elasticsearch-create-enrollment-token -s node ``` @@ -16,13 +16,13 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti 2. From the installation directory of your new node, start {{es}} and pass the enrollment token with the `--enrollment-token` parameter. - ```sh + ```sh subs=true bin{{slash}}elasticsearch --enrollment-token ``` {{es}} automatically generates certificates and keys in the following directory: - ```sh + ```sh subs=true config{{slash}}certs ``` diff --git a/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md index 8eb332503a..b12f7e3336 100644 --- a/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md +++ b/deploy-manage/deploy/self-managed/_snippets/pw-env-var.md @@ -2,7 +2,7 @@ The password for the `elastic` user and the enrollment token for {{kib}} are out We recommend storing the `elastic` password as an environment variable in your shell. For example: -```sh +```sh subs=true {{export}}ELASTIC_PASSWORD="your_password" ``` diff --git a/deploy-manage/deploy/self-managed/_snippets/wolfi.md b/deploy-manage/deploy/self-managed/_snippets/wolfi.md index 5c84753980..af884464ce 100644 --- a/deploy-manage/deploy/self-managed/_snippets/wolfi.md +++ b/deploy-manage/deploy/self-managed/_snippets/wolfi.md @@ -4,6 +4,6 @@ To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. For example: -```sh +```sh subs=true docker pull docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{stack-version}} ``` \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/access-kibana.md b/deploy-manage/deploy/self-managed/access-kibana.md index bfb6045a5d..f53b4afd3b 100644 --- a/deploy-manage/deploy/self-managed/access-kibana.md +++ b/deploy-manage/deploy/self-managed/access-kibana.md @@ -10,10 +10,14 @@ applies_to: Access {{kib}} through the web application on port 5601. -1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. +1. Point your web browser to the machine where you are running {{kib}} and specify the port number. For example, `localhost:5601` or `https://YOURDOMAIN.com:5601`. To remotely connect to {{kib}}, set [`server.host`](kibana://reference/configuration-reference/general-settings.md#server-host) to a non-loopback address. + :::{note} + For production deployments, you should always [secure {{kib}} with a certificate](/deploy-manage/security/secure-http-communications.md#encrypt-kibana-http) and access it over HTTPS. + ::: + 2. Log on to your account. 3. Go to the home page, then click **{{kib}}**. 4. To make the {{kib}} page your landing page, click **Make this my landing page**. diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index f41118fbb0..5a2887f170 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -5,13 +5,11 @@ mapped_urls: applies_to: deployment: self: -sub: - stack-version: "9.0.0" --- # Air gapped install -Some components of the {{stack}} require additional configuration and local dependencies in order to deploy in environments without internet access. This guide gives an overview of this setup scenario and helps bridge together existing documentation for individual parts of the stack. +Some components of the {{stack}} require additional configuration and local dependencies in order to deploy in environments without internet access. This guide gives an overview of this setup scenario and connects you to existing documentation for individual parts of the stack. Refer to the section for each Elastic component for air-gapped installation configuration and dependencies in a self-managed Linux environment. @@ -91,7 +89,7 @@ Besides setting up the EPR service, you also need to [configure {{kib}}](/deploy The following script generates a SystemD service file on a RHEL 8 system in order to run EPR with Podman in a production environment. -```sh +```sh subs=true #!/usr/bin/env bash EPR_BIND_ADDRESS="0.0.0.0" @@ -119,7 +117,7 @@ podman create \ The following is an example of an actual SystemD service file for an EPR, launched as a Podman service. -```ini +```ini subs=true # container-elastic-epr.service # autogenerated by Podman 4.1.1 # Wed Oct 19 13:12:33 UTC 2022 @@ -174,7 +172,7 @@ When setting up own web server, such as NGINX, to function as the {{artifact-reg The following example script downloads artifacts from the internet to be later served as a private Elastic Package Registry. -```sh +```sh subs=true #!/usr/bin/env bash set -o nounset -o errexit -o pipefail diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md index cf45a330f1..d2c4214e2a 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md @@ -1,6 +1,4 @@ --- -sub: - stack-version: "9.0.0" applies_to: deployment: self: @@ -36,7 +34,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes 3. Pull the {{es}} Docker image. - ```sh + ```sh subs=true docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -44,14 +42,14 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes $$$docker-verify-signature$$$ - ```sh + ```sh subs=true wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The `cosign` command prints the check results and the signature payload in JSON format: - ```sh + ```sh subs=true Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- The following checks were performed on each of these signatures: - The cosign claims were validated @@ -61,7 +59,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes 5. Start an {{es}} container. - ```sh + ```sh subs=true docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -72,7 +70,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes {{ml-cap}} features such as [semantic search with ELSER](/solutions/search/semantic-search/semantic-search-elser-ingest-pipelines.md) require a larger container with more than 1GB of memory. If you intend to use the {{ml}} capabilities, then start the container with this command: - ```sh + ```sh subs=true docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -115,7 +113,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes 2. Start a new {{es}} container. Include the enrollment token as an environment variable. - ```sh + ```sh subs=true docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -129,20 +127,20 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes 1. Pull the {{kib}} Docker image. - ```sh + ```sh subs=true docker pull docker.elastic.co/kibana/kibana:{{stack-version}} ``` 2. Optional: Verify the {{kib}} image’s signature. - ```sh + ```sh subs=true wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} ``` 3. Start a {{kib}} container. - ```sh + ```sh subs=true docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} ``` @@ -167,7 +165,7 @@ Use Docker commands to start a single-node {{es}} cluster for development or tes To remove the containers and their network, run: -```sh +```sh subs=true # Remove the Elastic network docker network rm elastic diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md index b819458791..f443916a38 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md @@ -1,6 +1,4 @@ --- -sub: - stack-version: "9.0.0" applies_to: deployment: self: @@ -43,7 +41,7 @@ Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Com 5. In the `.env` file, set `STACK_VERSION` to the current {{stack}} version. - ```txt + ```txt subs=true ... # Version of Elastic products STACK_VERSION={{stack-version}} @@ -56,7 +54,7 @@ Use Docker Compose to start a three-node {{es}} cluster with {{kib}}. Docker Com ```txt ... - # Port to expose {{es}} HTTP API to the host + # Port to expose Elasticsearch HTTP API to the host #ES_PORT=9200 ES_PORT=127.0.0.1:9200 ... diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md index 543f8dc40e..a2f5afb345 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -65,7 +65,7 @@ To encrypt your secure settings with a password and have them persist outside th For example: -```sh +```sh subs=true docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ @@ -74,7 +74,7 @@ bin/elasticsearch-keystore create -p You can also use a `docker run` command to add or update secure settings in the keystore. You’ll be prompted to enter the setting values. If the keystore is encrypted, you’ll also be prompted to enter the keystore password. -```sh +```sh subs=true docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} \ @@ -95,7 +95,7 @@ If you’ve already created the keystore and don’t need to update it, you can In some environments, it might make more sense to prepare a custom image that contains your configuration. A `Dockerfile` to achieve this might be as simple as: -```sh +```sh subs=true FROM docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ ``` diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index 58c9d19140..1654a7ef86 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -133,7 +133,7 @@ Increased ulimits for [nofile](setting-system-settings.md) and [nproc](/deploy-m To check the Docker daemon defaults for ulimits, run: -```sh +```sh subs=true docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' ``` @@ -168,7 +168,7 @@ To manually set the heap size in production, bind mount a [JVM options](elastics For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. -```sh +```sh subs=true docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -179,7 +179,7 @@ The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend Pin your deployments to a specific version of the {{es}} Docker image. For example: -```sh +```sh subs=true docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index 99fbf4a34d..71cfc5f0e4 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -6,7 +6,7 @@ sub: slash: "/" export: "export " escape: "\\" - stack-version: "9.0.0" + auto: " -d" navigation_title: "Install on Linux or MacOS" applies_to: deployment: @@ -39,7 +39,7 @@ Download and install the archive for Linux or MacOS. The Linux archive for {{es}} {{stack-version}} can be downloaded and installed as follows: -```sh +```sh subs=true wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-linux-x86_64.tar.gz wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-linux-x86_64.tar.gz.sha512 shasum -a 512 -c elasticsearch-{{stack-version}}-linux-x86_64.tar.gz.sha512 <1> @@ -56,7 +56,7 @@ cd elasticsearch-{{stack-version}}/ <2> The MacOS archive for {{es}} {{stack-version}} can be downloaded and installed as follows: -```sh +```sh subs=true curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz curl https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> tar -xzf elasticsearch-{{stack-version}}-darwin-x86_64.tar.gz diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index a82940033e..39088859ac 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -99,7 +99,7 @@ Examine `/etc/apt/sources.list.d/elasticsearch-9.x.list` for the duplicate entry The Debian package for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: -```sh +```sh subs=true wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-amd64.deb wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-amd64.deb.sha512 shasum -a 512 -c elasticsearch-{{stack-version}}-amd64.deb.sha512 <1> diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index db3e205ff6..9501dbe186 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -84,7 +84,7 @@ sudo zypper modifyrepo --enable elasticsearch && \ The RPM for {{es}} {{stack-version}} can be downloaded from the website and installed as follows: -```sh +```sh subs=true wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-x86_64.rpm wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{stack-version}}-x86_64.rpm.sha512 shasum -a 512 -c elasticsearch-{{stack-version}}-x86_64.rpm.sha512 <1> diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 0b51e1e9e2..af9caec177 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -6,7 +6,7 @@ sub: slash: "\\" export: "$" escape: "^" - stack-version: "9.0.0" + auto: ".bat" navigation_title: Install on Windows applies_to: deployment: @@ -42,7 +42,7 @@ Download the `.zip` archive for {{es}} {{stack-version}} from: [https://artifact Unzip it with your favorite unzip tool. This will create a folder called `elasticsearch-`, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: -```sh +```sh subs=true cd C:\Program Files\elasticsearch-{{stack-version}} ``` @@ -90,7 +90,7 @@ You can install {{es}} as a service that runs in the background or starts automa 1. Install {{es}} as a service. The name of the service and the value of `ES_JAVA_HOME` will be made available during install: - ```sh + ```sh subs=true C:\Program Files\elasticsearch-{{stack-version}}\bin>elasticsearch-service.bat install Installing service : "elasticsearch-service-x64" Using ES_JAVA_HOME (64-bit): "C:\jvm\jdk1.8" @@ -105,7 +105,7 @@ You can install {{es}} as a service that runs in the background or starts automa 2. Start {{es}} as a service. When {{es}} starts, authentication is enabled by default: - ```sh + ```sh subs=true C:\Program Files\elasticsearch-{{stack-version}}\bin>bin\elasticsearch-service.bat start ``` @@ -115,7 +115,7 @@ You can install {{es}} as a service that runs in the background or starts automa 3. Generate a password for the `elastic` user with the [`elasticsearch-reset-password`](elasticsearch://reference/elasticsearch/command-line-tools/reset-password.md) tool. The password is output to the command line. - ```sh + ```sh subs=true C:\Program Files\elasticsearch-{{stack-version}}\bin>\bin\elasticsearch-reset-password -u elastic ``` @@ -123,7 +123,7 @@ You can install {{es}} as a service that runs in the background or starts automa Run the `elasticsearch-service.bat` script in the `bin\` folder to install, remove, manage, or configure the service and potentially start and stop the service from the command line. -```sh +```sh subs=true C:\Program Files\elasticsearch-{{stack-version}}\bin>elasticsearch-service.bat ``` diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index e264d709e2..813aa38ebe 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -31,7 +31,7 @@ macOS is supported for development purposes only and is not covered under the su The Linux archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: -```sh +```sh subs=true curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-linux-x86_64.tar.gz curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-linux-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> tar -xzf kibana-{{stack-version}}-linux-x86_64.tar.gz @@ -45,7 +45,7 @@ cd kibana-{{stack-version}}/ <2> The Darwin archive for {{kib}} {{stack-version}} can be downloaded and installed as follows: -```sh +```sh subs=true curl -O https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz curl https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-darwin-x86_64.tar.gz.sha512 | shasum -a 512 -c - <1> tar -xzf kibana-{{stack-version}}-darwin-x86_64.tar.gz @@ -63,7 +63,7 @@ Apple’s rollout of stricter notarization requirements affected the notarizatio To prevent Gatekeeper checks on the {{kib}} files, run the following command on the downloaded `.tar.gz` archive or the directory to which was extracted: -```sh +```sh subs=true xattr -d -r com.apple.quarantine ``` diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md index 98e0d93e04..cb002075c1 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -26,7 +26,7 @@ Download the .zip windows archive for {{kib}} {{stack-version}} from [https://ar Unzip it with your favorite unzip tool. This will create a folder called kibana-{{stack-version}}-windows-x86_64, which we will refer to as `$KIBANA_HOME`. In a terminal window, CD to the `$KIBANA_HOME` directory, for instance: -```sh +```sh subs=true CD c:\kibana-{{stack-version}}-windows-x86_64 ``` diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index 3f00b15fcd..6e67bff505 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -83,7 +83,7 @@ Examine `/etc/apt/sources.list.d/kibana-9.x.list` for the duplicate entry or loc ### Download and install the Debian package manually [install-deb] The Debian package for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: -```sh +```sh subs=true wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-amd64.deb shasum -a 512 kibana-{{stack-version}}-amd64.deb <1> sudo dpkg -i kibana-{{stack-version}}-amd64.deb diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md index 17c5375be7..406fb657b1 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-docker.md @@ -53,20 +53,20 @@ This setup doesn’t run multiple {{es}} nodes by default. To create a multi-nod 3. Pull the {{es}} Docker image. - ```sh + ```sh subs=true docker pull docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` 4. Optional: Install [Cosign](https://docs.sigstore.dev/system_config/installation/) for your environment. Then use Cosign to verify the {{es}} image’s signature. - ```sh + ```sh subs=true wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` The `cosign` command prints the check results and the signature payload in JSON format: - ```sh + ```sh subs=true Verification for docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} -- The following checks were performed on each of these signatures: - The cosign claims were validated @@ -76,7 +76,7 @@ This setup doesn’t run multiple {{es}} nodes by default. To create a multi-nod 5. Start an {{es}} container. - ```sh + ```sh subs=true docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:{{stack-version}} ``` @@ -96,20 +96,20 @@ This setup doesn’t run multiple {{es}} nodes by default. To create a multi-nod 7. Pull the {{kib}} Docker image. - ```sh + ```sh subs=true docker pull docker.elastic.co/kibana/kibana:{{stack-version}} ``` 8. Optional: Verify the {{kib}} image’s signature. - ```sh + ```sh subs=true wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub docker.elastic.co/kibana/kibana:{{stack-version}} ``` 9. Start a {{kib}} container. - ```sh + ```sh subs=true docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{{stack-version}} ``` @@ -134,7 +134,7 @@ This setup doesn’t run multiple {{es}} nodes by default. To create a multi-nod To remove the containers and their network, run: -```sh +```sh subs=true # Remove the Elastic network docker network rm elastic @@ -155,7 +155,7 @@ The Docker images provide several methods for configuring {{kib}}. The conventio One way to configure {{kib}} on Docker is to provide `kibana.yml` via bind-mounting. With `docker-compose`, the bind-mount can be specified like this: -```yaml +```yaml subs=true version: '2' services: kibana: @@ -169,7 +169,7 @@ services: By default, {{kib}} auto-generates a keystore file for secure settings at startup. To persist your [secure settings](/deploy-manage/security/secure-settings.md), use the `kibana-keystore` utility to bind-mount the parent directory of the keystore to the container. For example: -```sh +```sh subs=true docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:{{stack-version}} bin/kibana-keystore create docker run -it --rm -v full_path_to/config:/usr/share/kibana/config -v full_path_to/data:/usr/share/kibana/data docker.elastic.co/kibana/kibana:{{stack-version}} bin/kibana-keystore add test_keystore_setting ``` @@ -200,7 +200,7 @@ Supplying array options can be tricky. The following example shows the syntax fo These variables can be set with `docker-compose` like this: -```yaml +```yaml subs=true version: '2' services: kibana: diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 367324e3e2..08ef616c45 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -73,7 +73,7 @@ sudo zypper install kibana <3> The RPM for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: -```sh +```sh subs=true wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-x86_64.rpm wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-x86_64.rpm.sha512 shasum -a 512 -c kibana-{{stack-version}}-x86_64.rpm.sha512 <1> diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index e943658443..31655c8423 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -226,8 +226,8 @@ From the {{es}} startup logs: Or by specifying a location to write a PID file to on startup (`-p `): ```sh -$ ./bin/{{es}} -p /tmp/{{es}}-pid -d -$ cat /tmp/{{es}}-pid && echo +$ ./bin/elasticsearch -p /tmp/elasticsearch-pid -d +$ cat /tmp/elasticsearch-pid && echo 15516 $ kill -SIGTERM 15516 ``` diff --git a/docset.yml b/docset.yml index b66e11ea7a..acffb7ca57 100644 --- a/docset.yml +++ b/docset.yml @@ -401,3 +401,7 @@ subs: icon-bug: "pass:[]" icon-checkInCircleFilled: "pass:[]" icon-warningFilled: "pass:[]" + stack-version: "9.0.0" + eck_version: "3.0.0" + apm_server_version: "9.0.0" + version: "9.0.0" \ No newline at end of file diff --git a/get-started/the-stack.md b/get-started/the-stack.md index 2daa664da6..dc82694304 100644 --- a/get-started/the-stack.md +++ b/get-started/the-stack.md @@ -5,8 +5,6 @@ mapped_urls: - https://www.elastic.co/guide/en/kibana/current/index.html - https://www.elastic.co/guide/en/elastic-stack/current/installing-elastic-stack.html - https://www.elastic.co/guide/en/elastic-stack/current/overview.html -sub: - stack-version: "9.0.0" --- # The {{stack}} diff --git a/solutions/observability/apps/apm-server-binary.md b/solutions/observability/apps/apm-server-binary.md index 9bbc1eba10..97823c556e 100644 --- a/solutions/observability/apps/apm-server-binary.md +++ b/solutions/observability/apps/apm-server-binary.md @@ -38,7 +38,7 @@ To download and install APM Server, use the commands below that work with your s $$$apm-deb$$$ **deb:** -```shell +```shell subs=true curl -L -O https://artifacts.elastic.co/downloads/apm-server/apm-server-{{apm_server_version}}-amd64.deb sudo dpkg -i apm-server-{{apm_server_version}}-amd64.deb ``` @@ -46,7 +46,7 @@ sudo dpkg -i apm-server-{{apm_server_version}}-amd64.deb $$$apm-rpm$$$ **RPM:** -```shell +```shell subs=true curl -L -O https://artifacts.elastic.co/downloads/apm-server/apm-server-{{apm_server_version}}-x86_64.rpm sudo rpm -vi apm-server-{{apm_server_version}}-x86_64.rpm ``` @@ -54,7 +54,7 @@ sudo rpm -vi apm-server-{{apm_server_version}}-x86_64.rpm $$$apm-linux$$$ **Other Linux:** -```shell +```shell subs=true curl -L -O https://artifacts.elastic.co/downloads/apm-server/apm-server-{{apm_server_version}}-linux-x86_64.tar.gz tar xzvf apm-server-{{apm_server_version}}-linux-x86_64.tar.gz ``` @@ -62,7 +62,7 @@ tar xzvf apm-server-{{apm_server_version}}-linux-x86_64.tar.gz $$$apm-mac$$$ **Mac:** -```shell +```shell subs=true curl -L -O https://artifacts.elastic.co/downloads/apm-server/apm-server-{{apm_server_version}}-darwin-x86_64.tar.gz tar xzvf apm-server-{{apm_server_version}}-darwin-x86_64.tar.gz ``` diff --git a/solutions/observability/logs/ecs-formatted-application-logs.md b/solutions/observability/logs/ecs-formatted-application-logs.md index 091f5b0fff..c25b79d6e4 100644 --- a/solutions/observability/logs/ecs-formatted-application-logs.md +++ b/solutions/observability/logs/ecs-formatted-application-logs.md @@ -75,14 +75,14 @@ Install {{filebeat}} on the server you want to monitor by running the commands t :::::::{tab-set} ::::::{tab-item} DEB -```sh +```sh subs=true curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-darwin-x86_64.tar.gz tar xzvf filebeat-{{version}}-darwin-x86_64.tar.gz ``` :::::: ::::::{tab-item} RPM -```sh +```sh subs=true curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-linux-x86_64.tar.gz tar xzvf filebeat-{{version}}-linux-x86_64.tar.gz ``` @@ -105,14 +105,14 @@ If script execution is disabled on your system, you need to set the execution po :::::: ::::::{tab-item} Linux -```sh +```sh subs=true curl -L -O https\://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-amd64.deb sudo dpkg -i filebeat-{{version}}-amd64.deb ``` :::::: ::::::{tab-item} Windows -```sh +```sh subs=true curl -L -O https\://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-x86_64.rpm sudo rpm -vi filebeat-{{version}}-x86_64.rpm ``` diff --git a/solutions/observability/logs/plaintext-application-logs.md b/solutions/observability/logs/plaintext-application-logs.md index 02e9fa8733..0fa33952f2 100644 --- a/solutions/observability/logs/plaintext-application-logs.md +++ b/solutions/observability/logs/plaintext-application-logs.md @@ -43,14 +43,14 @@ Install {{filebeat}} on the server you want to monitor by running the commands t :::::::{tab-set} ::::::{tab-item} DEB -```sh +```sh subs=true curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-darwin-x86_64.tar.gz tar xzvf filebeat-{{version}}-darwin-x86_64.tar.gz ``` :::::: ::::::{tab-item} RPM -```sh +```sh subs=true curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-linux-x86_64.tar.gz tar xzvf filebeat-{{version}}-linux-x86_64.tar.gz ``` @@ -63,7 +63,7 @@ tar xzvf filebeat-{{version}}-linux-x86_64.tar.gz 4. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select **Run As Administrator**). 5. From the PowerShell prompt, run the following commands to install {{filebeat}} as a Windows service: - ```powershell + ```powershell subs=true PS > cd 'C:\Program Files\{{filebeat}}' PS C:\Program Files\{filebeat}> .\install-service-filebeat.ps1 ``` @@ -73,14 +73,14 @@ If script execution is disabled on your system, you need to set the execution po :::::: ::::::{tab-item} Linux -```sh +```sh subs=true curl -L -O https\://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-amd64.deb sudo dpkg -i filebeat-{{version}}-amd64.deb ``` :::::: ::::::{tab-item} Windows -```sh +```sh subs=true curl -L -O https\://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{{version}}-x86_64.rpm sudo rpm -vi filebeat-{{version}}-x86_64.rpm ``` From 406a737bfc2a8ccdf3c1706b6b72cdf2ca9497d3 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 15:39:33 -0400 Subject: [PATCH 39/43] tidy --- .../deploy/self-managed/_snippets/enable-auto-indices.md | 8 +++++--- deploy-manage/deploy/self-managed/configure-kibana.md | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md b/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md index 9e21485203..c249af5120 100644 --- a/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md +++ b/deploy-manage/deploy/self-managed/_snippets/enable-auto-indices.md @@ -1,9 +1,11 @@ -Some features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in `elasticsearch.yml` to allow features to create the following indices: +:::{tip} +This section is only required if you have previously changed `action.auto_create_index` from its default value. +::: + +Some features automatically create indices within {{es}}. By default, {{es}} is configured to allow automatic index creation, and no additional steps are required. However, if you have disabled automatic index creation in {{es}}, you must configure [`action.auto_create_index`](elasticsearch://reference/elasticsearch/configuration-reference/index-management-settings.md#auto-create-index) in `elasticsearch.yml` to allow features to create the following indices: ```yaml action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* ``` -::::{important} If you are using [Logstash](https://www.elastic.co/products/logstash) or [Beats](https://www.elastic.co/products/beats) then you will most likely require additional index names in your `action.auto_create_index` setting, and the exact value will depend on your local configuration. If you are unsure of the correct value for your environment, you may consider setting the value to `*` which will allow automatic creation of all indices. -:::: \ No newline at end of file diff --git a/deploy-manage/deploy/self-managed/configure-kibana.md b/deploy-manage/deploy/self-managed/configure-kibana.md index 9d2cba6aa0..77e06e9421 100644 --- a/deploy-manage/deploy/self-managed/configure-kibana.md +++ b/deploy-manage/deploy/self-managed/configure-kibana.md @@ -42,4 +42,6 @@ Refer to the following documentation to learn how to perform key configuration t * Use [Connectors](/deploy-manage/manage-connectors.md) to manage connection information between {{es}}, {{kib}}, and third-party systems * Present a [user access agreement](/deploy-manage/users-roles/cluster-or-deployment-auth/access-agreement.md) when logging on to {{kib}} * Review [considerations for using {{kib}} in production](/deploy-manage/production-guidance/kibana-in-production-environments.md), including using load balancers -* [Monitor events inside and outside of {{kib}}](/deploy-manage/monitor.md) \ No newline at end of file +* [Monitor events inside and outside of {{kib}}](/deploy-manage/monitor.md) +* [Configure logging](/deploy-manage/monitor/logging-configuration.md) +* [Secure](/deploy-manage/security.md) {{kib}} communications and resources \ No newline at end of file From 8ad26312bb1a97d9c3ab322991da9e5264660d06 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 15:43:43 -0400 Subject: [PATCH 40/43] additional connection --- deploy-manage/deploy/self-managed/air-gapped-install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-manage/deploy/self-managed/air-gapped-install.md b/deploy-manage/deploy/self-managed/air-gapped-install.md index 5a2887f170..4fa3a47729 100644 --- a/deploy-manage/deploy/self-managed/air-gapped-install.md +++ b/deploy-manage/deploy/self-managed/air-gapped-install.md @@ -66,7 +66,7 @@ To learn more about installing {{fleet-server}}, refer to the [{{fleet-server}} Air-gapped setup of the APM server is possible in two ways: -* By setting up one of the {{agent}} deployments with an APM integration, as described in [Switch a self-installation to the APM integration](/solutions/observability/apps/switch-self-installation-to-apm-integration.md). +* By setting up one of the {{agent}} deployments with an APM integration, as described in [Switch a self-installation to the APM integration](/solutions/observability/apps/switch-self-installation-to-apm-integration.md). See [air gapped installation guidance for {{agent}}](#air-gapped-elastic-agent). * Or, by installing a standalone Elastic APM Server, as described in the [APM configuration documentation](/solutions/observability/apps/configure-apm-server.md). From a2c3c24271649abe2a9c5e9ef7d9d9747869034b Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 16:05:33 -0400 Subject: [PATCH 41/43] thanks colleen --- .../self-managed/install-elasticsearch-docker-configure.md | 2 -- .../deploy/self-managed/install-elasticsearch-docker-prod.md | 2 -- .../self-managed/install-elasticsearch-with-debian-package.md | 1 - .../deploy/self-managed/install-elasticsearch-with-docker.md | 2 -- .../deploy/self-managed/install-elasticsearch-with-rpm.md | 1 - .../self-managed/install-kibana-from-archive-on-linux-macos.md | 2 -- deploy-manage/deploy/self-managed/install-kibana-on-windows.md | 2 -- .../deploy/self-managed/install-kibana-with-debian-package.md | 3 +-- deploy-manage/deploy/self-managed/install-kibana-with-rpm.md | 2 -- deploy-manage/deploy/self-managed/install-kibana.md | 2 -- deploy-manage/deploy/self-managed/installing-elasticsearch.md | 2 -- 11 files changed, 1 insertion(+), 20 deletions(-) diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md index a2f5afb345..fe50572889 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-configure.md @@ -1,6 +1,4 @@ --- -sub: - stack-version: "9.0.0" applies_to: deployment: self: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md index 1654a7ef86..4fb62452c3 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod.md @@ -1,6 +1,4 @@ --- -sub: - stack-version: "9.0.0" applies_to: deployment: self: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 39088859ac..21437a4013 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -7,7 +7,6 @@ sub: distro: "Debian" export: "export " escape: "\\" - stack-version: "9.0.0" navigation_title: Install with Debian package applies_to: deployment: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index bf6a26ac91..343bc91272 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -2,8 +2,6 @@ mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html navigation_title: "Install with Docker" -sub: - stack-version: "9.0.0" applies_to: deployment: self: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 9501dbe186..8c906e46d2 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -7,7 +7,6 @@ sub: distro: "RPM" export: "export" escape: "\\" - stack-version: "9.0.0" navigation_title: "Install with RPM package" applies_to: deployment: diff --git a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md index 813aa38ebe..1583078527 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-kibana-from-archive-on-linux-macos.md @@ -2,8 +2,6 @@ navigation_title: "Install from archive on Linux or macOS" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/targz.html -sub: - stack-version: "9.0.0" navigation_title: "Linux and MacOS" applies_to: deployment: diff --git a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md index cb002075c1..ca24686f5b 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-kibana-on-windows.md @@ -2,8 +2,6 @@ navigation_title: "Install on Windows" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/windows.html -sub: - stack-version: "9.0.0" navigation_title: "Windows" applies_to: deployment: diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md index 6e67bff505..7b7cf2c15d 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-debian-package.md @@ -2,8 +2,6 @@ navigation_title: "Install with Debian package" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/deb.html -sub: - stack-version: "9.0.0" navigation_title: "Debian" applies_to: deployment: @@ -83,6 +81,7 @@ Examine `/etc/apt/sources.list.d/kibana-9.x.list` for the duplicate entry or loc ### Download and install the Debian package manually [install-deb] The Debian package for {{kib}} {{stack-version}} can be downloaded from the website and installed as follows: + ```sh subs=true wget https://artifacts.elastic.co/downloads/kibana/kibana-{{stack-version}}-amd64.deb shasum -a 512 kibana-{{stack-version}}-amd64.deb <1> diff --git a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md index 08ef616c45..f9a530ee2e 100644 --- a/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-kibana-with-rpm.md @@ -2,8 +2,6 @@ navigation_title: "Install with RPM" mapped_pages: - https://www.elastic.co/guide/en/kibana/current/rpm.html -sub: - stack-version: "9.0.0" navigation_title: "RPM" applies_to: deployment: diff --git a/deploy-manage/deploy/self-managed/install-kibana.md b/deploy-manage/deploy/self-managed/install-kibana.md index 7d3dc5edab..5641add591 100644 --- a/deploy-manage/deploy/self-managed/install-kibana.md +++ b/deploy-manage/deploy/self-managed/install-kibana.md @@ -5,8 +5,6 @@ mapped_urls: applies_to: deployment: self: -sub: - stack-version: "9.0.0" --- # Install {{kib}} diff --git a/deploy-manage/deploy/self-managed/installing-elasticsearch.md b/deploy-manage/deploy/self-managed/installing-elasticsearch.md index d80508a607..6fa5ec4dfa 100644 --- a/deploy-manage/deploy/self-managed/installing-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/installing-elasticsearch.md @@ -4,8 +4,6 @@ mapped_urls: - https://www.elastic.co/guide/en/elastic-stack/current/installing-elastic-stack.html - https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html - https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-stack-security.html -sub: - stack-version: "9.0.0" applies_to: deployment: self: From 3ccfdf31f341f7c617018e2846443151dc9ab1ea Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 16:20:14 -0400 Subject: [PATCH 42/43] better start-stop page" --- .../start-stop-elasticsearch.md | 163 +----------------- 1 file changed, 7 insertions(+), 156 deletions(-) diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index 31655c8423..e8205096d5 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -40,170 +40,21 @@ If you installed {{es}} on Windows with a `.zip` package, you can start {{es}} f If you're starting {{es}} for the first time, then {{es}} also enables and configures security. [Learn more](/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md#security-at-startup). -### Debian packages [start-deb] +### Debian or RPM packages (using `systemd`) [start-deb] -#### Running {{es}} with `systemd` [start-es-deb-systemd] - -To configure {{es}} to start automatically when the system boots up, run the following commands: - -```sh -sudo /bin/systemctl daemon-reload -sudo /bin/systemctl enable elasticsearch.service -``` - -{{es}} can be started and stopped as follows: - -```sh -sudo systemctl start elasticsearch.service -sudo systemctl stop elasticsearch.service -``` - -These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/{{es}}/`. - -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. - -```sh -echo "keystore_password" > /path/to/my_pwd_file.tmp -chmod 600 /path/to/my_pwd_file.tmp -sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp -sudo systemctl start elasticsearch.service -``` - -By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. - -When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: - -To tail the journal: - -```sh -sudo journalctl -f -``` - -To list journal entries for the {{es}} service: - -```sh -sudo journalctl --unit elasticsearch -``` - -To list journal entries for the {{es}} service starting from a given time: - -```sh -sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" -``` - -Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.html) for more command line options. - -::::{admonition} Startup timeouts with older systemd versions -:class: tip - -By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. - -Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: - -```text -[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... -... -[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... -``` - -However the `systemd` logs will report that the startup timed out: - -```text -Jan 31 01:22:30 debian systemd[1]: Starting elasticsearch... -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start elasticsearch. -``` +:::{include} /deploy-manage/deploy/self-managed/_snippets/systemd-startup.md +::: -To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. +:::{include} /deploy-manage/deploy/self-managed/_snippets/systemd.md +::: -:::: +:::{include} /deploy-manage/deploy/self-managed/_snippets/systemd-startup-timeout.md +::: ### Docker images [start-docker] If you installed a Docker image, you can start {{es}} from the command line. There are different methods depending on whether you’re using development mode or production mode. See [](../../../deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). -### RPM packages [start-rpm] - -#### Running {{es}} with `systemd` [start-es-rpm-systemd] - -To configure {{es}} to start automatically when the system boots up, run the following commands: - -```sh -sudo /bin/systemctl daemon-reload -sudo /bin/systemctl enable elasticsearch.service -``` - -{{es}} can be started and stopped as follows: - -```sh -sudo systemctl start elasticsearch.service -sudo systemctl stop elasticsearch.service -``` - -These commands provide no feedback as to whether {{es}} was started successfully or not. Instead, this information will be written in the log files located in `/var/log/{{es}}/`. - -If you have password-protected your {{es}} keystore, you will need to provide `systemd` with the keystore password using a local file and systemd environment variables. This local file should be protected while it exists and may be safely deleted once {{es}} is up and running. - -```sh -echo "keystore_password" > /path/to/my_pwd_file.tmp -chmod 600 /path/to/my_pwd_file.tmp -sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp -sudo systemctl start elasticsearch.service -``` - -By default the {{es}} service doesn’t log information in the `systemd` journal. To enable `journalctl` logging, the `--quiet` option must be removed from the `ExecStart` command line in the `elasticsearch.service` file. - -When `systemd` logging is enabled, the logging information are available using the `journalctl` commands: - -To tail the journal: - -```sh -sudo journalctl -f -``` - -To list journal entries for the {{es}} service: - -```sh -sudo journalctl --unit elasticsearch -``` - -To list journal entries for the {{es}} service starting from a given time: - -```sh -sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16" -``` - -Check `man journalctl` or [https://www.freedesktop.org/software/systemd/man/journalctl.html](https://www.freedesktop.org/software/systemd/man/journalctl.html) for more command line options. - -::::{admonition} Startup timeouts with older systemd versions -:class: tip - -By default {{es}} sets the `TimeoutStartSec` parameter to `systemd` to `900s`. If you are running at least version 238 of `systemd` then {{es}} can automatically extend the startup timeout, and will do so repeatedly until startup is complete even if it takes longer than 900s. - -Versions of `systemd` prior to 238 do not support the timeout extension mechanism and will terminate the {{es}} process if it has not fully started up within the configured timeout. If this happens, {{es}} will report in its logs that it was shut down normally a short time after it started: - -```text -[2022-01-31T01:22:31,077][INFO ][o.e.n.Node ] [instance-0000000123] starting ... -... -[2022-01-31T01:37:15,077][INFO ][o.e.n.Node ] [instance-0000000123] stopping ... -``` - -However the `systemd` logs will report that the startup timed out: - -```text -Jan 31 01:22:30 debian systemd[1]: Starting elasticsearch... -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Start operation timed out. Terminating. -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Main process exited, code=killed, status=15/TERM -Jan 31 01:37:15 debian systemd[1]: elasticsearch.service: Failed with result 'timeout'. -Jan 31 01:37:15 debian systemd[1]: Failed to start elasticsearch. -``` - -To avoid this, upgrade your `systemd` to at least version 238. You can also temporarily work around the problem by extending the `TimeoutStartSec` parameter. - -:::: - ## Stopping {{es}} [stopping-elasticsearch] An orderly shutdown of {{es}} ensures that {{es}} has a chance to cleanup and close outstanding resources. For example, a node that is shutdown in an orderly fashion will remove itself from the cluster, sync translogs to disk, and perform other related cleanup activities. You can help ensure an orderly shutdown by properly stopping {{es}}. From 9ab25414892924be655ae3920b5afaabae6c0ddf Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Mar 2025 16:23:51 -0400 Subject: [PATCH 43/43] fix anchor --- .../stack-docs/elastic-stack/installing-stack-demo-self.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index 349c4d0612..d1a12e4012 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -91,7 +91,7 @@ To begin, use RPM to install {{es}} on the first host. This initial {{es}} insta * Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. 7. Copy the terminal output from the install command to a local file. In particular, you’ll need the password for the built-in `elastic` superuser account. The output also contains the commands to enable {{es}} to run as a service, which you’ll use in the next step. -8. Run the following two commands to enable {{es}} to run as a service using `systemd`. This enables {{es}} to start automatically when the host system reboots. You can find details about this and the following steps in [Running {{es}} with `systemd`](../../../deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md#start-es-deb-systemd). +8. Run the following two commands to enable {{es}} to run as a service using `systemd`. This enables {{es}} to start automatically when the host system reboots. You can find details about this and the following steps in [Running {{es}} with `systemd`](../../../deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md#start-deb). ```sh sudo systemctl daemon-reload