From 9c0574b77994e97245c7046951e2c5b5ee9b1c83 Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Wed, 19 Feb 2025 09:28:48 -0600 Subject: [PATCH 1/6] use cross-repo links, update recently available local links --- .../elastic-cloud-enterprise-api-keys.md | 2 +- .../deployments-autoscaling-on-eck.md | 4 +- deploy-manage/autoscaling/ec-autoscaling.md | 2 +- .../ece-autoscaling-api-example.md | 2 +- deploy-manage/autoscaling/ece-autoscaling.md | 2 +- deploy-manage/autoscaling/ech-autoscaling.md | 2 +- .../cloud-organization/billing/billing-faq.md | 2 +- ...ud-hosted-deployment-billing-dimensions.md | 2 +- .../cloud-organization/tools-and-apis.md | 6 +- .../cloud-enterprise/assign-roles-to-hosts.md | 2 +- ...-add-support-for-node-roles-autoscaling.md | 4 +- .../cloud-enterprise/change-endpoint-urls.md | 2 +- .../configure-host-rhel-cloud.md | 4 +- .../cloud-enterprise/deployment-templates.md | 2 +- ...ce-configure-templates-index-management.md | 2 +- .../deploy/cloud-enterprise/ece-ha.md | 8 +-- .../cloud-enterprise/ece-manage-capacity.md | 4 +- .../deploy/cloud-enterprise/find-cloud-id.md | 8 +-- .../migrate-ece-to-podman-hosts.md | 4 +- .../advanced-configuration-logstash.md | 2 +- .../advanced-configuration-maps-server.md | 2 +- .../cloud-on-k8s/advanced-configuration.md | 2 +- .../advanced-elasticsearch-node-scheduling.md | 6 +- .../deploy/cloud-on-k8s/air-gapped-install.md | 2 +- .../cloud-on-k8s/configuration-beats.md | 6 +- .../configuration-examples-beats.md | 2 +- .../configuration-examples-fleet.md | 2 +- .../configuration-examples-standalone.md | 2 +- .../cloud-on-k8s/configuration-fleet.md | 4 +- .../cloud-on-k8s/configuration-standalone.md | 2 +- .../cloud-on-k8s/create-custom-images.md | 4 +- .../custom-configuration-files-plugins.md | 4 +- .../cloud-on-k8s/elastic-maps-server.md | 2 +- .../elastic-stack-configuration-policies.md | 2 +- .../elasticsearch-configuration.md | 4 +- .../elasticsearch-deployment-quickstart.md | 4 +- .../fleet-managed-elastic-agent.md | 2 +- .../deploy/cloud-on-k8s/http-configuration.md | 2 +- .../k8s-kibana-advanced-configuration.md | 8 +-- .../deploy/cloud-on-k8s/logstash-plugins.md | 30 ++++---- deploy-manage/deploy/cloud-on-k8s/map-data.md | 2 +- .../deploy/cloud-on-k8s/node-configuration.md | 2 +- .../cloud-on-k8s/nodes-orchestration.md | 6 +- .../deploy/cloud-on-k8s/quickstart-fleet.md | 2 +- .../deploy/cloud-on-k8s/readiness-probe.md | 2 +- deploy-manage/deploy/cloud-on-k8s/recipes.md | 2 +- ...requests-routing-to-elasticsearch-nodes.md | 2 +- .../cloud-on-k8s/required-rbac-permissions.md | 6 +- .../cloud-on-k8s/securing-logstash-api.md | 2 +- .../cloud-on-k8s/standalone-elastic-agent.md | 2 +- .../deploy/cloud-on-k8s/transport-settings.md | 2 +- .../cloud-on-k8s/troubleshooting-beats.md | 4 +- .../deploy/cloud-on-k8s/virtual-memory.md | 4 +- .../cloud-on-k8s/volume-claim-templates.md | 2 +- deploy-manage/deploy/elastic-cloud.md | 10 +-- .../elastic-cloud/add-plugins-extensions.md | 4 +- .../elastic-cloud/available-stack-versions.md | 2 +- .../elastic-cloud/azure-native-isv-service.md | 6 +- .../deploy/elastic-cloud/change-hardware.md | 4 +- ...eate-an-elastic-cloud-hosted-deployment.md | 4 +- ...nces-from-other-elasticsearch-offerings.md | 10 +-- .../ec-change-hardware-profile.md | 4 +- .../ec-configure-deployment-settings.md | 2 +- .../ec-customize-deployment-components.md | 2 +- .../ech-aws-instance-configuration.md | 4 +- .../ech-azure-instance-configuration.md | 4 +- .../ech-gcp-instance-configuration.md | 4 +- .../elastic-cloud/ech-getting-started.md | 2 +- .../deploy/elastic-cloud/ech-migrate-data2.md | 4 +- .../deploy/elastic-cloud/ech-restrictions.md | 6 +- .../elastic-cloud/ech-version-policy.md | 2 +- .../deploy/elastic-cloud/ech-whats-new.md | 4 +- .../deploy/elastic-cloud/find-cloud-id.md | 8 +-- ...age-deployments-using-elastic-cloud-api.md | 2 +- .../manage-integrations-server.md | 2 +- .../manage-plugins-extensions-through-api.md | 2 +- .../restrictions-known-problems.md | 8 +-- ...from-apm-to-integrations-server-payload.md | 2 +- .../bootstrap-checks-heap-size.md | 2 +- .../bootstrap-checks-max-map-count.md | 2 +- .../deploy/self-managed/configure.md | 6 +- .../self-managed/executable-jna-tmpdir.md | 2 +- .../important-settings-configuration.md | 12 ++-- ...asticsearch-from-archive-on-linux-macos.md | 8 +-- ...stall-elasticsearch-with-debian-package.md | 6 +- .../install-elasticsearch-with-docker.md | 8 +-- .../install-elasticsearch-with-rpm.md | 6 +- ...stall-elasticsearch-with-zip-on-windows.md | 12 ++-- .../install-from-archive-on-linux-macos.md | 2 +- .../deploy/self-managed/install-on-windows.md | 2 +- .../install-with-debian-package.md | 2 +- .../deploy/self-managed/install-with-rpm.md | 2 +- .../self-managed/max-number-threads-check.md | 2 +- .../self-managed/networkaddress-cache-ttl.md | 2 +- deploy-manage/deploy/self-managed/plugins.md | 2 +- .../self-managed/system-config-tcpretries.md | 2 +- .../deploy/self-managed/vm-max-map-count.md | 2 +- deploy-manage/distributed-architecture.md | 2 +- .../clusters-nodes-shards/node-roles.md | 10 +-- .../discovery-cluster-formation.md | 2 +- .../cluster-fault-detection.md | 2 +- .../discovery-hosts-providers.md | 8 +-- .../modules-discovery-bootstrap-cluster.md | 6 +- .../modules-discovery-quorums.md | 2 +- .../modules-discovery-voting.md | 2 +- .../kibana-tasks-management.md | 2 +- .../reading-and-writing-documents.md | 4 +- .../shard-allocation-relocation-recovery.md | 6 +- .../delaying-allocation-when-node-leaves.md | 2 +- .../shard-allocation-awareness.md | 2 +- .../add-and-remove-elasticsearch-nodes.md | 10 +-- ...ster-restart-rolling-restart-procedures.md | 6 +- .../start-stop-elasticsearch.md | 4 +- .../start-stop-services/start-stop-kibana.md | 2 +- deploy-manage/manage-spaces.md | 4 +- .../kibana-task-manager-health-monitoring.md | 2 +- .../auditing-search-queries.md | 2 +- .../configuring-audit-logs.md | 4 +- .../kibana-log-settings-examples.md | 2 +- .../logging-configuration/kibana-logging.md | 2 +- .../logfile-audit-events-ignore-policies.md | 4 +- ...ndices-metricbeat-7-internal-collection.md | 2 +- .../configure-stack-monitoring-alerts.md | 2 +- .../monitoring-data/ec-memory-pressure.md | 4 +- .../monitoring-data/ec-vcpu-boost-instance.md | 4 +- .../monitoring-data/elasticsearch-metrics.md | 2 +- .../monitor/monitoring-data/logstash-page.md | 2 +- .../monitor-troubleshooting.md | 2 +- .../visualizing-monitoring-data.md | 2 +- .../collecting-log-data-with-filebeat.md | 16 ++--- ...lecting-monitoring-data-with-metricbeat.md | 12 ++-- .../stack-monitoring/ece-stack-monitoring.md | 2 +- .../stack-monitoring/eck-stack-monitoring.md | 4 +- .../stack-monitoring/es-http-exporter.md | 4 +- .../es-legacy-collection-methods.md | 4 +- .../stack-monitoring/es-local-exporter.md | 2 +- .../es-monitoring-collectors.md | 2 +- .../es-monitoring-exporters.md | 6 +- .../stack-monitoring/k8s_when_to_use_it.md | 2 +- .../kibana-monitoring-data.md | 2 +- .../kibana-monitoring-legacy.md | 4 +- .../kibana-monitoring-metricbeat.md | 14 ++-- deploy-manage/production-guidance.md | 10 +-- .../availability-and-resilience.md | 2 +- .../resilience-in-larger-clusters.md | 2 +- .../resilience-in-small-clusters.md | 2 +- .../general-recommendations.md | 6 +- ...bana-alerting-production-considerations.md | 4 +- ...ana-task-manager-scaling-considerations.md | 2 +- .../approximate-knn-search.md | 14 ++-- .../optimize-performance/disk-usage.md | 12 ++-- .../optimize-performance/indexing-speed.md | 4 +- .../optimize-performance/search-speed.md | 28 ++++---- .../optimize-performance/size-shards.md | 16 ++--- .../hotfrozen-high-availability.md | 18 ++--- .../remote-clusters/ec-enable-ccs-for-eck.md | 4 +- .../remote-clusters/ec-enable-ccs.md | 10 +-- .../remote-clusters/ec-migrate-ccs.md | 2 +- .../remote-clusters/ec-remote-cluster-ece.md | 8 +-- .../ec-remote-cluster-other-ess.md | 6 +- .../ec-remote-cluster-same-ess.md | 8 +-- .../ec-remote-cluster-self-managed.md | 8 +-- .../remote-clusters/ece-enable-ccs-for-eck.md | 4 +- .../remote-clusters/ece-enable-ccs.md | 10 +-- .../remote-clusters/ece-migrate-ccs.md | 2 +- .../ece-remote-cluster-ece-ess.md | 8 +-- .../ece-remote-cluster-other-ece.md | 8 +-- .../ece-remote-cluster-same-ece.md | 8 +-- .../ece-remote-cluster-self-managed.md | 8 +-- .../remote-clusters/eck-remote-clusters.md | 6 +- .../remote-clusters-api-key.md | 8 +-- .../remote-clusters/remote-clusters-cert.md | 6 +- .../remote-clusters-migrate.md | 4 +- .../remote-clusters-self-managed.md | 8 +-- .../remote-clusters-settings.md | 8 +-- .../remote-clusters-troubleshooting.md | 2 +- deploy-manage/security/different-ca.md | 2 +- ...g-cipher-suites-for-stronger-encryption.md | 2 +- ...nt-with-customer-managed-encryption-key.md | 4 +- .../security/httprest-clients-security.md | 8 +-- .../security/kibana-session-management.md | 2 +- deploy-manage/security/same-ca.md | 2 +- .../security/secure-clients-integrations.md | 18 ++--- deploy-manage/security/secure-endpoints.md | 2 +- .../security/secure-saved-objects.md | 2 +- .../security/security-certificates-keys.md | 12 ++-- .../set-up-basic-security-plus-https.md | 8 +-- .../security/set-up-basic-security.md | 4 +- .../security/set-up-minimal-security.md | 2 +- ...upported-ssltls-versions-by-jdk-version.md | 4 +- .../tools/cross-cluster-replication.md | 10 +-- .../_connect_to_a_remote_cluster.md | 2 +- .../_failback_when_clustera_comes_back.md | 2 +- .../_perform_update_or_delete_by_query.md | 2 +- .../bi-directional-disaster-recovery.md | 2 +- .../ccr-getting-started-prerequisites.md | 2 +- .../ccr-recreate-follower-index.md | 2 +- .../manage-auto-follow-patterns.md | 2 +- .../set-up-cross-cluster-replication.md | 2 +- deploy-manage/tools/snapshot-and-restore.md | 4 +- .../snapshot-and-restore/azure-repository.md | 4 +- .../azure-storage-repository.md | 2 +- .../snapshot-and-restore/cloud-enterprise.md | 10 +-- .../snapshot-and-restore/cloud-on-k8s.md | 6 +- .../snapshot-and-restore/create-snapshots.md | 6 +- .../ec-aws-custom-repository.md | 2 +- .../ec-gcs-snapshotting.md | 2 +- .../ech-aws-custom-repository.md | 2 +- .../ech-gcs-snapshotting.md | 2 +- .../google-cloud-storage-gcs-repository.md | 2 +- .../google-cloud-storage-repository.md | 2 +- .../manage-snapshot-repositories.md | 2 +- .../read-only-url-repository.md | 6 +- .../snapshot-and-restore/s3-repository.md | 8 +-- .../searchable-snapshots.md | 8 +-- .../snapshot-and-restore/self-managed.md | 4 +- .../shared-file-system-repository.md | 4 +- .../source-only-repository.md | 4 +- ...dices-from-older-elasticsearch-versions.md | 24 +++---- .../orchestrator/upgrade-cloud-on-k8s.md | 2 +- .../prepare-to-upgrade/index-compatibility.md | 2 +- .../cloud-enterprise-orchestrator/saml.md | 2 +- .../controlling-user-cache.md | 2 +- ...looking-up-users-without-authentication.md | 2 +- ...ge-authentication-for-multiple-clusters.md | 8 +-- .../operator-only-functionality.md | 6 +- .../cluster-or-deployment-auth/pki.md | 2 +- .../realm-chains.md | 2 +- .../service-accounts.md | 2 +- .../token-based-authentication-services.md | 2 +- .../alerts/alerting-common-issues.md | 8 +-- .../alerts/alerting-getting-started.md | 2 +- .../alerts-cases/alerts/alerting-setup.md | 8 +-- .../alerts/alerting-troubleshooting.md | 2 +- .../alerts/create-manage-rules.md | 6 +- .../alerts/notifications-domain-allowlist.md | 2 +- .../alerts/rule-action-variables.md | 6 +- .../alerts-cases/alerts/rule-type-es-query.md | 4 +- .../alerts/rule-type-index-threshold.md | 2 +- .../alerts-cases/alerts/rule-types.md | 2 +- .../cases/manage-cases-settings.md | 2 +- .../alerts-cases/cases/manage-cases.md | 4 +- .../alerts-cases/cases/setup-cases.md | 2 +- .../alerts-cases/watcher/actions-email.md | 2 +- .../alerts-cases/watcher/actions-index.md | 2 +- .../alerts-cases/watcher/actions-jira.md | 2 +- .../alerts-cases/watcher/actions-slack.md | 2 +- .../alerts-cases/watcher/actions-webhook.md | 2 +- .../alerts-cases/watcher/enable-watcher.md | 4 +- .../alerts-cases/watcher/encrypting-data.md | 4 +- .../alerts-cases/watcher/input-search.md | 2 +- .../alerts-cases/watcher/transform-search.md | 2 +- .../watcher/watch-cluster-status.md | 2 +- .../alerts-cases/watcher/watcher-ui.md | 4 +- ...dashboard-of-panels-with-ecommerce-data.md | 2 +- ...ashboard-of-panels-with-web-server-data.md | 2 +- explore-analyze/dashboards/drilldowns.md | 2 +- .../discover/discover-get-started.md | 2 +- explore-analyze/discover/document-explorer.md | 2 +- explore-analyze/discover/search-sessions.md | 2 +- .../find-and-organize/data-views.md | 4 +- .../find-and-organize/saved-objects.md | 2 +- explore-analyze/geospatial-analysis.md | 30 ++++---- .../anomaly-detection-scale.md | 2 +- .../anomaly-detection/geographic-anomalies.md | 12 ++-- .../anomaly-detection/ml-ad-run-jobs.md | 6 +- .../ml-configuring-aggregation.md | 12 ++-- .../ml-configuring-categories.md | 8 +-- .../ml-configuring-populations.md | 2 +- .../ml-configuring-transform.md | 4 +- .../anomaly-detection/ml-functions.md | 14 ++-- .../anomaly-detection/ml-getting-started.md | 10 +-- .../anomaly-detection/ml-limitations.md | 22 +++--- .../anomaly-detection/move-jobs.md | 4 +- .../anomaly-detection/ootb-ml-jobs.md | 20 +++--- .../ml-dfa-classification.md | 4 +- .../ml-dfa-limitations.md | 2 +- .../data-frame-analytics/ml-dfa-regression.md | 4 +- .../data-frame-analytics/ml-trained-models.md | 4 +- .../machine-learning-in-kibana.md | 2 +- .../inference-processing.md | 2 +- .../xpack-ml-aiops.md | 2 +- .../nlp/ml-nlp-deploy-model.md | 2 +- .../machine-learning/nlp/ml-nlp-elser.md | 6 +- .../nlp/ml-nlp-import-model.md | 4 +- .../machine-learning/nlp/ml-nlp-inference.md | 6 +- .../nlp/ml-nlp-limitations.md | 2 +- .../machine-learning/nlp/ml-nlp-model-ref.md | 2 +- .../nlp/ml-nlp-ner-example.md | 2 +- .../machine-learning/nlp/ml-nlp-overview.md | 2 +- .../machine-learning/nlp/ml-nlp-rerank.md | 2 +- .../nlp/ml-nlp-search-compare.md | 2 +- .../ml-nlp-text-emb-vector-search-example.md | 2 +- .../setting-up-machine-learning.md | 4 +- explore-analyze/numeral-formatting.md | 2 +- explore-analyze/query-filter/aggregations.md | 18 ++--- ...-data-with-aggregations-using-query-dsl.md | 34 ++++----- explore-analyze/query-filter/filtering.md | 2 +- explore-analyze/query-filter/languages/eql.md | 12 ++-- .../languages/esql-cross-clusters.md | 6 +- .../languages/esql-getting-started.md | 12 ++-- .../query-filter/languages/esql-kibana.md | 8 +-- .../languages/esql-multi-index.md | 4 +- .../query-filter/languages/esql.md | 12 ++-- .../example-detect-threats-with-eql.md | 2 +- explore-analyze/query-filter/languages/kql.md | 6 +- .../languages/lucene-query-syntax.md | 2 +- .../query-filter/languages/querydsl.md | 36 +++++----- .../query-filter/languages/sql-cli.md | 2 +- .../query-filter/languages/sql-data-types.md | 28 ++++---- .../languages/sql-functions-aggs.md | 6 +- .../languages/sql-functions-datetime.md | 2 +- .../languages/sql-functions-geo.md | 2 +- .../languages/sql-functions-grouping.md | 2 +- .../languages/sql-functions-search.md | 8 +-- .../languages/sql-index-patterns.md | 2 +- .../languages/sql-lexical-structure.md | 2 +- .../languages/sql-like-rlike-operators.md | 4 +- .../query-filter/languages/sql-limitations.md | 4 +- .../query-filter/languages/sql-pagination.md | 2 +- .../languages/sql-rest-filtering.md | 2 +- .../languages/sql-syntax-select.md | 4 +- .../languages/sql-syntax-show-tables.md | 2 +- .../query-filter/languages/sql-translate.md | 2 +- explore-analyze/query-filter/tools/console.md | 2 +- .../query-filter/tools/grok-debugger.md | 2 +- explore-analyze/report-and-share.md | 2 +- .../reporting-troubleshooting-csv.md | 4 +- .../reporting-troubleshooting.md | 2 +- explore-analyze/scripting/dissect.md | 2 +- explore-analyze/scripting/grok.md | 6 +- .../scripting/modules-scripting-engine.md | 2 +- .../scripting/modules-scripting-fields.md | 14 ++-- .../scripting/modules-scripting-painless.md | 2 +- .../scripting/modules-scripting-security.md | 6 +- .../scripting/modules-scripting-using.md | 4 +- .../scripting/scripting-field-extraction.md | 2 +- .../scripting/scripts-search-speed.md | 2 +- .../transforms/ecommerce-transforms.md | 6 +- .../transforms/transform-checkpoints.md | 4 +- .../transforms/transform-examples.md | 8 +-- .../transforms/transform-limitations.md | 14 ++-- .../transforms/transform-painless-examples.md | 8 +-- explore-analyze/transforms/transform-scale.md | 4 +- explore-analyze/transforms/transform-usage.md | 4 +- explore-analyze/visualize.md | 4 +- .../visualize/canvas/canvas-tutorial.md | 4 +- .../custom-visualizations-with-vega.md | 2 +- explore-analyze/visualize/esorql.md | 2 +- explore-analyze/visualize/field-statistics.md | 2 +- .../visualize/graph/graph-troubleshooting.md | 2 +- .../visualize/legacy-editors/timelion.md | 2 +- .../visualize/legacy-editors/tsvb.md | 2 +- explore-analyze/visualize/link-panels.md | 2 +- .../visualize/maps/asset-tracking-tutorial.md | 4 +- .../visualize/maps/heatmap-layer.md | 2 +- .../visualize/maps/import-geospatial-data.md | 4 +- .../maps/indexing-geojson-data-tutorial.md | 2 +- .../maps/maps-create-filter-from-map.md | 2 +- .../visualize/maps/maps-grid-aggregation.md | 2 +- .../maps-search-across-multiple-indices.md | 4 +- .../maps/maps-top-hits-aggregation.md | 2 +- .../visualize/maps/maps-troubleshooting.md | 2 +- .../visualize/maps/point-to-point.md | 2 +- .../maps/reverse-geocoding-tutorial.md | 4 +- explore-analyze/visualize/maps/terms-join.md | 2 +- .../visualize/maps/vector-layer.md | 4 +- .../visualize/maps/vector-style.md | 2 +- .../visualize/supported-chart-types.md | 6 +- get-started/deployment-options.md | 2 +- get-started/introduction.md | 2 +- get-started/the-stack.md | 8 +-- manage-data/data-store/aliases.md | 2 +- manage-data/data-store/data-streams.md | 4 +- .../downsampling-time-series-data-stream.md | 6 +- .../data-streams/logs-data-stream.md | 8 +-- .../data-streams/modify-data-stream.md | 8 +-- .../data-store/data-streams/reindex-tsds.md | 2 +- ...ownsampling-using-data-stream-lifecycle.md | 2 +- .../data-streams/run-downsampling-with-ilm.md | 4 +- .../data-streams/set-up-data-stream.md | 6 +- .../data-store/data-streams/set-up-tsds.md | 6 +- .../time-series-data-stream-tsds.md | 28 ++++---- .../data-streams/use-data-stream.md | 4 +- manage-data/data-store/index-basics.md | 8 +-- manage-data/data-store/mapping.md | 12 ++-- ...define-runtime-fields-in-search-request.md | 4 +- .../mapping/dynamic-field-mapping.md | 14 ++-- .../data-store/mapping/dynamic-templates.md | 4 +- .../data-store/mapping/explicit-mapping.md | 12 ++-- .../explore-data-with-runtime-fields.md | 4 +- .../data-store/mapping/index-runtime-field.md | 2 +- .../data-store/mapping/map-runtime-field.md | 6 +- .../override-field-values-at-query-time.md | 2 +- .../mapping/retrieve-runtime-field.md | 4 +- .../data-store/mapping/runtime-fields.md | 6 +- manage-data/data-store/templates.md | 4 +- .../templates/index-template-management.md | 2 +- manage-data/data-store/text-analysis.md | 6 +- .../text-analysis/anatomy-of-an-analyzer.md | 12 ++-- .../text-analysis/configure-text-analysis.md | 4 +- .../configuring-built-in-analyzers.md | 2 +- .../text-analysis/create-custom-analyzer.md | 24 +++---- .../text-analysis/index-search-analysis.md | 4 +- .../text-analysis/specify-an-analyzer.md | 12 ++-- .../data-store/text-analysis/stemming.md | 18 ++--- .../data-store/text-analysis/token-graphs.md | 12 ++-- manage-data/ingest.md | 2 +- .../agent-es-airgapped.md | 2 +- .../agent-kafka-essink.md | 4 +- .../agent-kafka-ls.md | 8 +-- .../agent-ls-airgapped.md | 4 +- .../agent-proxy.md | 2 +- .../ls-enrich.md | 10 +-- .../ls-for-input.md | 4 +- .../ls-multi.md | 6 +- .../ls-networkbridge.md | 4 +- .../ingest-reference-architectures/lspq.md | 8 +-- .../use-case-arch.md | 2 +- .../ingesting-data-for-elastic-solutions.md | 8 +-- ...icsearch-service-with-logstash-as-proxy.md | 18 ++--- ...nal-database-into-elasticsearch-service.md | 4 +- ...ta-with-nodejs-on-elasticsearch-service.md | 8 +-- ...ta-with-python-on-elasticsearch-service.md | 4 +- ...m-nodejs-web-application-using-filebeat.md | 12 ++-- ...-from-python-application-using-filebeat.md | 14 ++-- .../ingest/ingesting-timeseries-data.md | 16 ++--- manage-data/ingest/transform-enrich.md | 4 +- ...ample-enrich-data-based-on-exact-values.md | 4 +- ...xample-enrich-data-based-on-geolocation.md | 6 +- ...-enrich-data-by-matching-value-to-range.md | 4 +- .../transform-enrich/example-parse-logs.md | 10 +-- .../ingest-pipelines-serverless.md | 2 +- .../transform-enrich/ingest-pipelines.md | 28 ++++---- .../transform-enrich/logstash-pipelines.md | 4 +- .../set-up-an-enrich-processor.md | 8 +-- manage-data/lifecycle/curator.md | 2 +- manage-data/lifecycle/data-stream.md | 2 +- .../tutorial-data-stream-retention.md | 2 +- ...ed-data-stream-to-data-stream-lifecycle.md | 2 +- manage-data/lifecycle/data-tiers.md | 16 ++--- .../lifecycle/index-lifecycle-management.md | 2 +- .../configure-lifecycle-policy.md | 2 +- .../index-lifecycle.md | 24 +++---- .../index-management-in-kibana.md | 6 +- .../manage-existing-indices.md | 2 +- ...-index-allocation-filters-to-node-roles.md | 6 +- .../index-lifecycle-management/rollover.md | 2 +- .../tutorial-automate-rollover.md | 2 +- .../rollup/getting-started-kibana.md | 2 +- .../lifecycle/rollup/understanding-groups.md | 2 +- manage-data/migrate.md | 4 +- ...lasticsearch-to-manage-time-series-data.md | 10 +-- .../apm-agent-android/release-notes.md | 2 +- .../cloud-on-k8s/k8s-es-secure-settings.md | 2 +- .../cloud-on-k8s/k8s-orchestration.md | 6 +- .../cloud-on-k8s/k8s-saml-authentication.md | 6 +- .../cloud-on-k8s/k8s-upgrading-stack.md | 2 +- .../cloud-on-k8s/k8s-users-and-roles.md | 6 +- .../Elastic-Cloud-Enterprise-overview.md | 2 +- .../ece-add-custom-bundle-plugin.md | 2 +- .../cloud-enterprise/ece-add-user-settings.md | 2 +- .../cloud/cloud-enterprise/ece-api-console.md | 2 +- .../cloud/cloud-enterprise/ece-autoscaling.md | 2 +- .../ece-getting-started-node-js.md | 6 +- .../ece-getting-started-python.md | 4 +- ...started-search-use-cases-beats-logstash.md | 18 ++--- ...ng-started-search-use-cases-db-logstash.md | 4 +- ...ting-started-search-use-cases-node-logs.md | 12 ++-- ...ng-started-search-use-cases-python-logs.md | 14 ++-- .../cloud-enterprise/ece-install-offline.md | 2 +- .../ece-maintenance-mode-routing.md | 2 +- .../ece-manage-apm-settings.md | 2 +- .../ece-manage-integrations-server.md | 2 +- ...stful-api-examples-configuring-keystore.md | 2 +- .../ece-secure-clusters-kerberos.md | 2 +- .../ece-secure-clusters-oidc.md | 2 +- .../ece-securing-clusters-JWT.md | 2 +- .../ece-securing-clusters-SAML.md | 2 +- .../ece-securing-clusters-ad.md | 4 +- .../ece-securing-clusters-ldap.md | 4 +- .../cloud-enterprise/ece-securing-clusters.md | 2 +- ...ffic-filtering-deployment-configuration.md | 2 +- .../ece-upgrade-deployment.md | 2 +- .../cloud/cloud-enterprise/ece-upgrade.md | 2 +- .../cloud-enterprise/ece_optional_settings.md | 2 +- .../cloud-heroku/ech-add-user-settings.md | 24 +++---- .../cloud/cloud-heroku/ech-adding-plugins.md | 2 +- .../cloud/cloud-heroku/ech-autoscaling.md | 2 +- .../cloud/cloud-heroku/ech-custom-bundles.md | 2 +- .../ech-enable-logging-and-monitoring.md | 2 +- .../cloud/cloud-heroku/ech-getting-started.md | 2 +- .../ech-manage-kibana-settings.md | 24 +++---- .../cloud-heroku/ech-monitoring-setup.md | 18 ++--- .../cloud/cloud-heroku/ech-password-reset.md | 2 +- .../ech-secure-clusters-kerberos.md | 2 +- .../cloud-heroku/ech-secure-clusters-oidc.md | 2 +- .../cloud-heroku/ech-securing-clusters-JWT.md | 2 +- .../ech-securing-clusters-SAML.md | 2 +- .../cloud/cloud-heroku/ech-security.md | 2 +- .../echsign-outgoing-saml-message.md | 2 +- raw-migrated-files/cloud/cloud/ec-about.md | 4 +- .../cloud/cloud/ec-add-user-settings.md | 24 +++---- .../cloud/cloud/ec-autoscaling.md | 2 +- .../cloud/cloud/ec-cloud-ingest-data.md | 26 +++---- .../cloud/cloud/ec-custom-bundles.md | 2 +- .../cloud/ec-enable-logging-and-monitoring.md | 2 +- .../cloud/cloud/ec-faq-getting-started.md | 2 +- .../cloud/cloud/ec-getting-started-node-js.md | 8 +-- .../cloud/cloud/ec-getting-started-python.md | 4 +- ...started-search-use-cases-beats-logstash.md | 18 ++--- ...ng-started-search-use-cases-db-logstash.md | 4 +- ...ting-started-search-use-cases-node-logs.md | 12 ++-- ...ng-started-search-use-cases-python-logs.md | 14 ++-- .../cloud/ec-maintenance-mode-routing.md | 2 +- .../cloud/cloud/ec-manage-kibana-settings.md | 24 +++---- .../cloud/cloud/ec-metrics-memory-pressure.md | 6 +- .../cloud/cloud/ec-monitoring-setup.md | 18 ++--- .../cloud/cloud/ec-password-reset.md | 2 +- .../cloud/ec-secure-clusters-kerberos.md | 2 +- .../cloud/cloud/ec-secure-clusters-oidc.md | 2 +- .../cloud/cloud/ec-securing-clusters-JWT.md | 2 +- .../cloud/cloud/ec-securing-clusters-SAML.md | 2 +- .../cloud/ec-securing-clusters-oidc-op.md | 2 +- raw-migrated-files/cloud/cloud/ec-security.md | 2 +- .../cloud/ec-select-subscription-level.md | 6 +- .../cloud/ec-sign-outgoing-saml-message.md | 2 +- .../ec-traffic-filtering-through-the-api.md | 2 +- .../serverless/action-connectors.md | 56 +++++++-------- .../serverless/ai-assistant-knowledge-base.md | 2 +- .../detections-logsdb-index-mode-impact.md | 2 +- .../serverless/elasticsearch-clients.md | 14 ++-- .../serverless/elasticsearch-differences.md | 10 +-- .../elasticsearch-ingest-data-through-api.md | 2 +- .../serverless/general-sign-up-trial.md | 2 +- ...nfrastructure-and-host-monitoring-intro.md | 2 +- .../docs-content/serverless/intro.md | 10 +-- .../observability-add-logs-service-name.md | 6 +- .../serverless/observability-ai-assistant.md | 8 +-- .../serverless/observability-analyze-hosts.md | 6 +- ...ability-apm-agents-aws-lambda-functions.md | 6 +- ...rvability-apm-agents-elastic-apm-agents.md | 70 +++++++++--------- .../observability-apm-agents-opentelemetry.md | 8 +-- .../observability-apm-compress-spans.md | 10 +-- .../observability-apm-distributed-tracing.md | 14 ++-- .../observability-apm-filter-your-data.md | 14 ++-- ...action-latency-and-failure-correlations.md | 2 +- .../observability-apm-get-started.md | 42 +++++------ .../observability-apm-keep-data-secure.md | 12 ++-- .../observability-apm-transaction-sampling.md | 14 ++-- .../serverless/observability-case-settings.md | 14 ++-- ...bservability-correlate-application-logs.md | 14 ++-- ...observability-create-anomaly-alert-rule.md | 38 +++++----- ...lity-create-custom-threshold-alert-rule.md | 38 +++++----- ...create-error-count-threshold-alert-rule.md | 38 +++++----- ...d-transaction-rate-threshold-alert-rule.md | 38 +++++----- ...y-create-inventory-threshold-alert-rule.md | 38 +++++----- ...ity-create-latency-threshold-alert-rule.md | 38 +++++----- ...ability-create-slo-burn-rate-alert-rule.md | 38 +++++----- .../observability-detect-metric-anomalies.md | 2 +- .../observability-ecs-application-logs.md | 36 +++++----- ...observability-filter-and-aggregate-logs.md | 4 +- ...ability-handle-no-results-found-message.md | 4 +- ...observability-infrastructure-monitoring.md | 4 +- .../observability-log-monitoring.md | 12 ++-- .../observability-monitor-datasets.md | 2 +- .../observability-monitor-status-alert.md | 38 +++++----- .../observability-parse-log-data.md | 20 +++--- ...bservability-plaintext-application-logs.md | 26 +++---- .../observability-send-application-logs.md | 2 +- .../observability-stream-log-files.md | 2 +- .../observability-synthetics-lightweight.md | 2 +- ...servability-synthetics-manage-retention.md | 2 +- ...servability-synthetics-private-location.md | 8 +-- ...vability-synthetics-security-encryption.md | 2 +- ...servability-view-infrastructure-metrics.md | 6 +- .../serverless/project-setting-data.md | 4 +- .../serverless/security-about-rules.md | 2 +- .../serverless/security-advanced-settings.md | 8 +-- .../serverless/security-alerts-run-osquery.md | 2 +- .../serverless/security-automatic-import.md | 6 +- ...security-behavioral-detection-use-cases.md | 2 +- .../serverless/security-cases-settings.md | 14 ++-- .../serverless/security-conf-map-ui.md | 4 +- .../serverless/security-data-quality-dash.md | 4 +- .../security-detection-engine-overview.md | 2 +- .../security-detection-entity-dashboard.md | 2 +- .../serverless/security-endpoints-page.md | 2 +- .../security-get-started-with-kspm.md | 2 +- .../serverless/security-ingest-data.md | 56 +++++++-------- .../serverless/security-install-edr.md | 6 +- ...curity-interactive-investigation-guides.md | 2 +- .../security-invest-guide-run-osquery.md | 2 +- .../serverless/security-machine-learning.md | 2 +- .../security-osquery-response-action.md | 2 +- .../security-prebuilt-rules-management.md | 2 +- .../security-query-alert-indices.md | 2 +- .../security-requirements-overview.md | 2 +- .../security-response-actions-config.md | 6 +- .../serverless/security-response-actions.md | 2 +- .../serverless/security-rules-create.md | 14 ++-- .../serverless/security-timelines-ui.md | 2 +- .../docs-content/serverless/security-ui.md | 4 +- .../serverless/security-view-alert-details.md | 2 +- .../active-directory-realm.md | 2 +- .../autoscaling-fixed-decider.md | 2 +- .../autoscaling-frozen-shards-decider.md | 2 +- .../autoscaling-machine-learning-decider.md | 4 +- .../autoscaling-proactive-storage-decider.md | 2 +- .../autoscaling-reactive-storage-decider.md | 2 +- .../bootstrap-checks-xpack.md | 2 +- .../bootstrap-checks.md | 4 +- .../elasticsearch-reference/built-in-roles.md | 2 +- .../elasticsearch-reference/built-in-users.md | 6 +- .../change-passwords-native-users.md | 2 +- .../configuring-stack-security.md | 12 ++-- .../elasticsearch-reference/defining-roles.md | 6 +- .../documents-indices.md | 4 +- .../elasticsearch-intro-deploy.md | 2 +- .../es-security-principles.md | 2 +- .../field-and-document-access-control.md | 2 +- .../field-level-security.md | 2 +- .../elasticsearch-reference/file-realm.md | 4 +- .../fips-140-compliance.md | 10 +-- .../how-monitoring-works.md | 16 ++--- .../elasticsearch-reference/index-mgmt.md | 6 +- .../index-modules-allocation.md | 4 +- .../ingest-enriching-data.md | 2 +- .../elasticsearch-reference/ip-filtering.md | 2 +- .../elasticsearch-reference/jwt-auth-realm.md | 6 +- .../elasticsearch-reference/kerberos-realm.md | 4 +- .../elasticsearch-reference/ldap-realm.md | 2 +- .../elasticsearch-reference/mapping-roles.md | 2 +- .../monitoring-overview.md | 2 +- .../monitoring-production.md | 18 ++--- .../elasticsearch-reference/native-realm.md | 2 +- .../elasticsearch-reference/oidc-guide.md | 2 +- .../role-mapping-resources.md | 2 +- .../saml-guide-stack.md | 6 +- .../elasticsearch-reference/search-analyze.md | 12 ++-- .../search-with-synonyms.md | 6 +- .../secure-monitoring.md | 4 +- .../secure-settings.md | 10 +-- .../security-basic-setup-https.md | 8 +-- .../security-basic-setup.md | 4 +- .../security-limitations.md | 4 +- .../semantic-search-inference.md | 4 +- .../shard-allocation-filtering.md | 6 +- .../shard-request-cache.md | 4 +- .../snapshot-restore.md | 4 +- .../snapshots-restore-snapshot.md | 8 +-- .../starting-elasticsearch.md | 4 +- .../fleet/beats-agent-comparison.md | 48 ++++++------- .../fleet/fleet-elastic-agent-quick-start.md | 2 +- .../kibana/kibana/action-types.md | 58 +++++++-------- .../kibana/kibana/apm-settings-kb.md | 2 +- .../kibana/kibana/connect-to-elasticsearch.md | 2 +- .../kibana/kibana/console-kibana.md | 2 +- .../kibana/kibana/elasticsearch-mutual-tls.md | 2 +- raw-migrated-files/kibana/kibana/esql.md | 2 +- raw-migrated-files/kibana/kibana/install.md | 2 +- .../kibana/kibana/kibana-authentication.md | 6 +- .../kibana/kibana/management.md | 6 +- raw-migrated-files/kibana/kibana/osquery.md | 8 +-- .../kibana/kibana/search-ai-assistant.md | 6 +- .../kibana/kibana/secure-reporting.md | 6 +- .../kibana/kibana/set-time-filter.md | 2 +- raw-migrated-files/kibana/kibana/setup.md | 2 +- raw-migrated-files/kibana/kibana/upgrade.md | 2 +- .../kibana/using-kibana-with-security.md | 6 +- .../kibana/kibana/xpack-spaces.md | 4 +- ...report-pipeline-flow-worker-utilization.md | 2 +- .../logstash/logstash/ts-logstash.md | 6 +- .../observability/add-logs-service-name.md | 6 +- .../observability/analyze-hosts.md | 8 +-- .../observability/apm-agents.md | 20 +++--- .../observability/apm-anomaly-rule.md | 36 +++++----- .../apm-collect-application-data.md | 4 +- .../observability/apm-common-problems.md | 10 +-- .../observability/apm-correlations.md | 2 +- .../observability/apm-data-model-spans.md | 16 ++--- .../observability/apm-data-model-traces.md | 16 ++--- .../apm-error-count-threshold-rule.md | 36 +++++----- ...-failed-transaction-rate-threshold-rule.md | 36 +++++----- .../observability/apm-filters.md | 16 ++--- .../apm-latency-threshold-rule.md | 36 +++++----- .../apm-monitoring-aws-lambda.md | 8 +-- .../observability/apm-open-telemetry.md | 8 +-- .../observability/apm-sampling.md | 14 ++-- .../observability/apm-settings-in-kibana.md | 2 +- .../observability/application-logs.md | 14 ++-- .../observability/configure-settings.md | 2 +- .../observability/custom-threshold-alert.md | 36 +++++----- .../handle-no-results-found-message.md | 4 +- ...nfrastructure-and-host-monitoring-intro.md | 2 +- .../infrastructure-threshold-alert.md | 36 +++++----- .../observability/inspect-metric-anomalies.md | 4 +- .../observability/logs-checklist.md | 10 +-- .../observability/logs-ecs-application.md | 24 +++---- .../logs-filter-and-aggregate.md | 4 +- .../observability/logs-parse.md | 20 +++--- .../observability/logs-plaintext.md | 20 +++--- .../observability/logs-send-application.md | 2 +- .../observability/logs-stream.md | 4 +- .../observability/manage-cases-settings.md | 2 +- .../observability/manage-cases.md | 4 +- .../observability/monitor-datasets.md | 2 +- .../monitor-infrastructure-and-hosts.md | 2 +- .../observability/monitor-status-alert.md | 36 +++++----- .../observability/obs-ai-assistant.md | 22 +++--- .../observability-introduction.md | 2 +- ...kstart-monitor-hosts-with-elastic-agent.md | 2 +- .../observability/slo-burn-rate-alert.md | 38 +++++----- .../observability/slo-create.md | 2 +- .../observability-docs/observability/slo.md | 2 +- .../observability/synthetics-lightweight.md | 2 +- .../synthetics-manage-retention.md | 2 +- .../synthetics-private-location.md | 10 +-- .../synthetics-scale-and-architect.md | 2 +- .../synthetics-security-encryption.md | 2 +- .../view-infrastructure-metrics.md | 8 +-- .../security-docs/security/es-overview.md | 2 +- .../elastic-stack/air-gapped-install.md | 18 ++--- .../install-stack-demo-secure.md | 8 +-- .../installing-stack-demo-self.md | 6 +- .../stack-docs/elastic-stack/overview.md | 2 +- ...upgrade-elastic-stack-for-elastic-cloud.md | 12 ++-- .../upgrading-elastic-stack-on-prem.md | 10 +-- .../elastic-stack/upgrading-elastic-stack.md | 12 ++-- .../elastic-stack/upgrading-elasticsearch.md | 2 +- .../elastic-stack/upgrading-kibana.md | 4 +- .../troubleshooting-and-faqs.md | 16 ++--- solutions/observability/apps/api-keys.md | 18 ++--- .../apps/apm-agent-central-configuration.md | 20 +++--- .../apps/apm-agent-tls-communication.md | 16 ++--- .../observability/apps/apm-k8s-attacher.md | 4 +- .../observability/apps/apm-server-binary.md | 52 +++++++------- .../apps/apm-server-performance-diagnostic.md | 2 +- .../observability/apps/apm-server-systemd.md | 2 +- solutions/observability/apps/apm-ui-api.md | 2 +- .../apps/applications-ui-annotation-user.md | 2 +- .../apps/built-in-data-filters.md | 20 +++--- .../apps/configure-elasticsearch-output.md | 2 +- .../observability/apps/configure-logging.md | 4 +- .../apps/configure-logstash-output.md | 14 ++-- .../configure-real-user-monitoring-rum.md | 2 +- .../apps/configure-redis-output.md | 2 +- ...ssign-feature-roles-to-apm-server-users.md | 2 +- ...ss-cluster-search-with-application-data.md | 6 +- .../observability/apps/custom-filters.md | 18 ++--- solutions/observability/apps/data-streams.md | 6 +- .../enable-apm-server-binary-debugging.md | 2 +- .../apps/fleet-managed-apm-server.md | 72 +++++++++---------- .../apps/get-started-with-uptime.md | 2 +- .../observability/apps/high-availability.md | 2 +- .../apps/inspect-uptime-duration-anomalies.md | 2 +- solutions/observability/apps/inventory.md | 2 +- solutions/observability/apps/metadata.md | 20 +++--- solutions/observability/apps/metrics.md | 8 +-- .../observability/apps/monitor-apm-server.md | 2 +- .../apps/monitor-fleet-managed-apm-server.md | 12 ++-- solutions/observability/apps/secret-token.md | 18 ++--- .../apps/ssltls-input-settings.md | 2 +- .../apps/ssltls-output-settings.md | 2 +- ...ch-self-installation-to-apm-integration.md | 8 +-- .../apps/tutorial-monitor-java-application.md | 26 +++---- ...lastic-cloud-apm-server-standalone-to-8.md | 2 +- ...lastic-cloud-with-apm-integration-to-80.md | 2 +- ...f-installation-of-apm-integration-to-8x.md | 4 +- ...allation-of-apm-server-standalone-to-8x.md | 2 +- .../apps/upgrade-to-version-8x.md | 2 +- .../apps/uptime-monitoring-deprecated.md | 2 +- .../use-metricbeat-to-send-monitoring-data.md | 10 +-- .../apps/view-elasticsearch-index-template.md | 4 +- .../observability/apps/view-monitor-status.md | 2 +- ...s-when-apm-server-elasticsearch-is-down.md | 8 +-- ...itor-amazon-web-services-aws-with-beats.md | 16 ++--- .../monitor-google-cloud-platform-gcp.md | 14 ++-- .../monitor-microsoft-azure-with-beats.md | 6 +- .../get-started/add-data-from-splunk.md | 2 +- ...gure-service-level-objective-slo-access.md | 2 +- .../create-an-anomaly-detection-rule.md | 38 +++++----- .../create-an-elasticsearch-query-rule.md | 42 +++++------ .../create-an-uptime-duration-anomaly-rule.md | 36 +++++----- .../create-log-threshold-rule.md | 36 +++++----- .../create-metric-threshold-rule.md | 36 +++++----- .../create-tls-certificate-rule.md | 36 +++++----- ...xplore-infrastructure-metrics-over-time.md | 4 +- .../infra-and-hosts/manage-data-storage.md | 2 +- .../operate-universal-profiling-backend.md | 2 +- ...-2-enable-universal-profiling-in-kibana.md | 8 +-- ...tutorial-observe-kubernetes-deployments.md | 42 +++++------ .../tutorial-observe-nginx-instances.md | 2 +- ...l-profiling-index-life-cycle-management.md | 2 +- .../logs/configure-data-sources.md | 4 +- .../logs/inspect-log-anomalies.md | 4 +- .../observability-ai-assistant.md | 22 +++--- .../unknown-bucket/aws-metrics.md | 2 +- .../unknown-bucket/kubernetes-pod-metrics.md | 2 +- .../unknown-bucket/view-monitor-status.md | 2 +- solutions/search/cross-cluster-search.md | 10 +-- .../search/elasticsearch-basics-quickstart.md | 6 +- solutions/search/full-text.md | 10 +-- .../search/full-text/how-full-text-works.md | 8 +-- .../mixing-exact-search-with-stemming.md | 2 +- .../static-scoring-signals.md | 10 +-- .../search/full-text/search-with-synonyms.md | 8 +-- .../full-text/text-analysis-during-search.md | 12 ++-- solutions/search/hybrid-search.md | 2 +- solutions/search/hybrid-semantic-text.md | 2 +- .../elastic-inference-service-eis.md | 2 +- .../querydsl-full-text-filter-tutorial.md | 20 +++--- .../search/rag/playground-troubleshooting.md | 2 +- solutions/search/rag/playground.md | 2 +- solutions/search/ranking.md | 2 +- .../learning-to-rank-model-training.md | 2 +- .../ranking/learning-to-rank-search-usage.md | 2 +- .../search/ranking/semantic-reranking.md | 4 +- solutions/search/retrievers-examples.md | 8 +-- solutions/search/retrievers-overview.md | 2 +- .../search-application-api.md | 6 +- .../search-application-client.md | 4 +- .../near-real-time-search.md | 2 +- solutions/search/search-pipelines.md | 14 ++-- solutions/search/semantic-search/cohere-es.md | 4 +- .../semantic-search-elser-ingest-pipelines.md | 10 +-- .../semantic-search-inference.md | 4 +- .../semantic-search-semantic-text.md | 2 +- .../serverless-elasticsearch-get-started.md | 4 +- .../behavioral-analytics-event-reference.md | 2 +- solutions/search/site-or-app/search-ui.md | 16 ++--- solutions/search/the-search-api.md | 22 +++--- ...er-endpoint-before-cross-cluster-search.md | 2 +- solutions/search/vector/bring-own-vectors.md | 4 +- .../dense-versus-sparse-ingest-pipelines.md | 12 ++-- solutions/search/vector/knn.md | 14 ++-- solutions/search/vector/sparse-vector.md | 2 +- .../anomaly-detection.md | 2 +- .../behavioral-detection-use-cases.md | 2 +- .../ai/ai-assistant-knowledge-base.md | 4 +- solutions/security/asset-management.md | 4 +- ...tive-vulnerability-management-dashboard.md | 2 +- .../cloud-native-vulnerability-management.md | 2 +- .../security/cloud/get-started-with-cnvm.md | 2 +- .../security/cloud/get-started-with-kspm.md | 2 +- solutions/security/cloud/ingest-wiz-data.md | 2 +- ...eate-an-elastic-defend-policy-using-api.md | 2 +- .../install-elastic-defend.md | 8 +-- ...-off-diagnostic-data-for-elastic-defend.md | 2 +- .../uninstall-elastic-agent.md | 2 +- ...tive-vulnerability-management-dashboard.md | 2 +- .../dashboards/data-quality-dashboard.md | 4 +- .../dashboards/entity-analytics-dashboard.md | 2 +- solutions/security/detect-and-alert.md | 2 +- .../detect-and-alert/about-detection-rules.md | 4 +- .../detect-and-alert/add-manage-exceptions.md | 4 +- .../detect-and-alert/create-detection-rule.md | 16 ++--- .../create-manage-shared-exception-lists.md | 2 +- .../create-manage-value-lists.md | 4 +- .../detections-requirements.md | 2 +- .../install-manage-elastic-prebuilt-rules.md | 2 +- ...unch-timeline-from-investigation-guides.md | 2 +- .../detect-and-alert/query-alert-indices.md | 2 +- ...logsdb-index-mode-with-elastic-security.md | 2 +- .../view-detection-alert-details.md | 2 +- .../security/endpoint-response-actions.md | 2 +- .../configure-third-party-response-actions.md | 6 +- .../explore/configure-network-map-data.md | 4 +- solutions/security/explore/hosts-page.md | 2 +- .../security/get-started/automatic-import.md | 6 +- .../configure-advanced-settings.md | 6 +- .../elastic-security-requirements.md | 6 +- ...enable-threat-intelligence-integrations.md | 8 +-- .../ingest-data-to-elastic-security.md | 56 +++++++-------- .../add-osquery-response-actions.md | 2 +- .../investigate/configure-case-settings.md | 14 ++-- .../investigate/indicators-of-compromise.md | 4 +- .../investigate/manage-integration.md | 8 +-- .../security/investigate/open-manage-cases.md | 4 +- solutions/security/investigate/osquery-faq.md | 2 +- .../investigate/run-osquery-from-alerts.md | 2 +- .../run-osquery-from-investigation-guides.md | 2 +- solutions/security/investigate/timeline.md | 4 +- .../manage-elastic-defend/endpoints.md | 2 +- .../event-capture-elastic-defend.md | 2 +- .../cloud-enterprise/common-issues.md | 2 +- .../use-emergency-roles-token.md | 2 +- .../cloud-on-k8s/common-problems.md | 2 +- .../cloud-on-k8s/jvm-heap-dumps.md | 2 +- .../cloud-on-k8s/troubleshooting-methods.md | 2 +- .../esf/elastic-serverless-forwarder.md | 2 +- .../allow-all-cluster-allocation.md | 2 +- .../allow-all-index-allocation.md | 2 +- .../elasticsearch/circuit-breaker-errors.md | 6 +- .../decrease-disk-usage-data-node.md | 2 +- .../diagnose-unassigned-shards.md | 2 +- troubleshoot/elasticsearch/diagnostic.md | 6 +- .../discovery-troubleshooting.md | 4 +- .../error-no-such-method.md | 2 +- .../nodejs.md | 2 +- .../elasticsearch-client-ruby-api/ruby.md | 2 +- .../elasticsearch-for-apache-hadoop.md | 4 +- troubleshoot/elasticsearch/elasticsearch.md | 2 +- .../fix-common-cluster-issues.md | 2 +- .../fix-master-node-out-of-disk.md | 2 +- .../fix-other-node-out-of-disk.md | 2 +- .../elasticsearch/fix-watermark-errors.md | 6 +- troubleshoot/elasticsearch/high-cpu-usage.md | 4 +- .../elasticsearch/high-jvm-memory-pressure.md | 14 ++-- troubleshoot/elasticsearch/hotspotting.md | 16 ++--- .../increase-capacity-data-node.md | 2 +- .../increase-cluster-shard-limit.md | 4 +- .../elasticsearch/increase-shard-limit.md | 4 +- .../elasticsearch/increase-tier-capacity.md | 4 +- .../index-lifecycle-management-errors.md | 4 +- .../elasticsearch/mapping-explosion.md | 12 ++-- .../monitoring-troubleshooting.md | 4 +- .../red-yellow-cluster-status.md | 14 ++-- .../elasticsearch/rejected-requests.md | 8 +-- troubleshoot/elasticsearch/remote-clusters.md | 2 +- .../repeated-snapshot-failures.md | 6 +- .../security/security-trb-extraargs.md | 2 +- .../security/security-trb-roles.md | 4 +- .../security/trb-security-kerberos.md | 4 +- .../security/trb-security-saml.md | 4 +- .../security/trb-security-setup.md | 4 +- .../security/trb-security-ssl.md | 2 +- .../troubleshoot-migrate-to-tiers.md | 2 +- .../elasticsearch/troubleshooting-searches.md | 10 +-- .../troubleshooting-shards-capacity-issues.md | 4 +- .../troubleshooting-unbalanced-cluster.md | 4 +- .../troubleshooting-unstable-cluster.md | 8 +-- troubleshoot/ingest/fleet/common-problems.md | 26 +++---- .../fleet/frequently-asked-questions.md | 12 ++-- troubleshoot/ingest/logstash.md | 6 +- troubleshoot/ingest/logstash/kafka.md | 2 +- troubleshoot/kibana/alerts.md | 2 +- troubleshoot/kibana/capturing-diagnostics.md | 2 +- troubleshoot/kibana/error-server-not-ready.md | 2 +- troubleshoot/kibana/graph.md | 2 +- troubleshoot/kibana/maps.md | 2 +- troubleshoot/kibana/migration-failures.md | 4 +- troubleshoot/kibana/monitoring.md | 2 +- troubleshoot/kibana/reporting.md | 2 +- troubleshoot/kibana/task-manager.md | 2 +- ...ticsearch-query-to-the-origin-in-kibana.md | 2 +- .../kibana/using-kibana-server-logs.md | 4 +- .../monitoring/high-memory-pressure.md | 6 +- troubleshoot/monitoring/node-bootlooping.md | 4 +- troubleshoot/monitoring/unavailable-nodes.md | 4 +- troubleshoot/monitoring/unavailable-shards.md | 6 +- .../observability/amazon-data-firehose.md | 2 +- .../apm-agent-dotnet/apm-net-agent.md | 12 ++-- .../apm-agent-go/apm-go-agent.md | 4 +- .../apm-agent-java/apm-java-agent.md | 24 +++---- .../apm-agent-nodejs/apm-nodejs-agent.md | 8 +-- .../apm-agent-php/apm-php-agent.md | 6 +- .../apm-agent-python/apm-python-agent.md | 4 +- .../apm-agent-ruby/apm-ruby-agent.md | 2 +- ...m-real-user-monitoring-javascript-agent.md | 2 +- .../apm-agent-swift/apm-ios-agent.md | 2 +- troubleshoot/observability/apm.md | 20 +++--- .../apm/_agent_is_not_instrumenting_code.md | 2 +- .../apm/apm-server-performance-diagnostic.md | 2 +- .../observability/apm/common-problems.md | 10 +-- .../apm/enable-apm-server-binary-debugging.md | 2 +- ...s-when-apm-server-elasticsearch-is-down.md | 8 +-- troubleshoot/observability/inspect.md | 2 +- .../observability/troubleshoot-logs.md | 6 +- .../troubleshoot-mapping-issues.md | 2 +- ...ubleshoot-service-level-objectives-slos.md | 2 +- .../understanding-no-results-found-message.md | 4 +- troubleshoot/security/detection-rules.md | 2 +- 973 files changed, 3218 insertions(+), 3216 deletions(-) diff --git a/deploy-manage/api-keys/elastic-cloud-enterprise-api-keys.md b/deploy-manage/api-keys/elastic-cloud-enterprise-api-keys.md index 8817a48f39..c58e3a977a 100644 --- a/deploy-manage/api-keys/elastic-cloud-enterprise-api-keys.md +++ b/deploy-manage/api-keys/elastic-cloud-enterprise-api-keys.md @@ -70,5 +70,5 @@ To create a bearer token: { "token": "eyJ0eXa......MgBmsw4s" } ``` -2. Specify the bearer token in the Authentication header of your API requests. To learn more, check [accessing the API from the command line](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-api-command-line.html). +2. Specify the bearer token in the Authentication header of your API requests. To learn more, check [accessing the API from the command line](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-enterprise/ece-api-command-line.md). diff --git a/deploy-manage/autoscaling/deployments-autoscaling-on-eck.md b/deploy-manage/autoscaling/deployments-autoscaling-on-eck.md index 3e85baceb8..421d109b7a 100644 --- a/deploy-manage/autoscaling/deployments-autoscaling-on-eck.md +++ b/deploy-manage/autoscaling/deployments-autoscaling-on-eck.md @@ -10,7 +10,7 @@ Elasticsearch autoscaling requires a valid Enterprise license or Enterprise tria :::: -ECK can leverage the [autoscaling API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-autoscaling) introduced in Elasticsearch 7.11 to adjust automatically the number of Pods and the allocated resources in a tier. Currently, autoscaling is supported for Elasticsearch [data tiers](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) and machine learning nodes. +ECK can leverage the [autoscaling API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-autoscaling) introduced in Elasticsearch 7.11 to adjust automatically the number of Pods and the allocated resources in a tier. Currently, autoscaling is supported for Elasticsearch [data tiers](/manage-data/lifecycle/data-tiers.md) and machine learning nodes. ## Enable autoscaling [k8s-enable] @@ -159,7 +159,7 @@ spec: max: 512Gi ``` -You can find [a complete example in the ECK GitHub repository](https://github.com/elastic/cloud-on-k8s/blob/2.16/config/recipes/autoscaling/elasticsearch.yaml) which will also show you how to fine-tune the [autoscaling deciders](https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-deciders.html). +You can find [a complete example in the ECK GitHub repository](https://github.com/elastic/cloud-on-k8s/blob/2.16/config/recipes/autoscaling/elasticsearch.yaml) which will also show you how to fine-tune the [autoscaling deciders](/deploy-manage/autoscaling/autoscaling-deciders.md). ### Change the polling interval [k8s-autoscaling-polling-interval] diff --git a/deploy-manage/autoscaling/ec-autoscaling.md b/deploy-manage/autoscaling/ec-autoscaling.md index c7e94def41..999e7bd6b5 100644 --- a/deploy-manage/autoscaling/ec-autoscaling.md +++ b/deploy-manage/autoscaling/ec-autoscaling.md @@ -62,7 +62,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/deploy-manage/autoscaling/ece-autoscaling-api-example.md b/deploy-manage/autoscaling/ece-autoscaling-api-example.md index 539cedcc87..dfdeb0cde4 100644 --- a/deploy-manage/autoscaling/ece-autoscaling-api-example.md +++ b/deploy-manage/autoscaling/ece-autoscaling-api-example.md @@ -7,7 +7,7 @@ mapped_pages: This example demonstrates how to use the Elastic Cloud Enterprise RESTful API to create a deployment with autoscaling enabled. -The example deployment has a hot data and content tier, warm data tier, cold data tier, and a machine learning node, all of which will scale within the defined parameters. To learn about the autoscaling settings, check [Deployment autoscaling](../autoscaling.md) and [Autoscaling example](ece-autoscaling-example.md). For more information about using the Elastic Cloud Enterprise API in general, check [RESTful API](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-restful-api.html). +The example deployment has a hot data and content tier, warm data tier, cold data tier, and a machine learning node, all of which will scale within the defined parameters. To learn about the autoscaling settings, check [Deployment autoscaling](../autoscaling.md) and [Autoscaling example](ece-autoscaling-example.md). For more information about using the Elastic Cloud Enterprise API in general, check [RESTful API](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-enterprise/restful-api.md). ## Requirements [ece_requirements_3] diff --git a/deploy-manage/autoscaling/ece-autoscaling.md b/deploy-manage/autoscaling/ece-autoscaling.md index a398492397..682e76e054 100644 --- a/deploy-manage/autoscaling/ece-autoscaling.md +++ b/deploy-manage/autoscaling/ece-autoscaling.md @@ -62,7 +62,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/deploy-manage/autoscaling/ech-autoscaling.md b/deploy-manage/autoscaling/ech-autoscaling.md index 126aeaaa1f..eeef25ab3c 100644 --- a/deploy-manage/autoscaling/ech-autoscaling.md +++ b/deploy-manage/autoscaling/ech-autoscaling.md @@ -62,7 +62,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/deploy-manage/cloud-organization/billing/billing-faq.md b/deploy-manage/cloud-organization/billing/billing-faq.md index f0ee15c8ae..c4f4a06a6c 100644 --- a/deploy-manage/cloud-organization/billing/billing-faq.md +++ b/deploy-manage/cloud-organization/billing/billing-faq.md @@ -108,7 +108,7 @@ $$$faq-dts$$$What are the data transfer and storage charges and how can I contro $$$faq-taxes$$$What taxes will be applied on my invoice? : Customers within the United States, and US territories, will be billed from Elasticsearch Inc., based out of the United States. The US Sales Tax rate will be based on the SaaS tax rates in the local jurisdiction (state/county/city) of the billing address of your subscription. - Customers outside the United States, will be billed from Elasticsearch BV, based out of the Netherlands. Customers with a billing address in countries with applicable EU VAT will have VAT applied based on their country and status as a business or private customer. Elastic collects VAT Numbers associated with EU VAT to determine your status as a business (B2B) or private / non-business customer (B2C), as this is a key factor to determine Elastic’s liability to charge VAT on your subscription. To update your VAT Number follow the instructions provided in [Add your billing details](https://www.elastic.co/guide/en/cloud/current/ec-billing-details.html). Customers located in countries without EU VAT will not be applied VAT on their invoices. + Customers outside the United States, will be billed from Elasticsearch BV, based out of the Netherlands. Customers with a billing address in countries with applicable EU VAT will have VAT applied based on their country and status as a business or private customer. Elastic collects VAT Numbers associated with EU VAT to determine your status as a business (B2B) or private / non-business customer (B2C), as this is a key factor to determine Elastic’s liability to charge VAT on your subscription. To update your VAT Number follow the instructions provided in [Add your billing details](/deploy-manage/cloud-organization/billing/add-billing-details.md). Customers located in countries without EU VAT will not be applied VAT on their invoices. diff --git a/deploy-manage/cloud-organization/billing/cloud-hosted-deployment-billing-dimensions.md b/deploy-manage/cloud-organization/billing/cloud-hosted-deployment-billing-dimensions.md index 385bfbdf20..84aec88cf6 100644 --- a/deploy-manage/cloud-organization/billing/cloud-hosted-deployment-billing-dimensions.md +++ b/deploy-manage/cloud-organization/billing/cloud-hosted-deployment-billing-dimensions.md @@ -57,7 +57,7 @@ Data inter-node charges are currently waived for Azure deployments. Data transfer out of deployments and between nodes of the cluster is hard to control, as it is a function of the use case employed for the cluster and cannot always be tuned. Use cases such as batch queries executed at a frequent interval may be revisited to help lower transfer costs, if applicable. Watcher email alerts also count towards data transfer out of the deployment, so you may want to reduce their frequency and size. -The largest contributor to inter-node data transfer is usually shard movement between nodes in a cluster. The only way to prevent shard movement is by having a single node in a single availability zone. This solution is only possible for clusters up to 64GB RAM and is not recommended as it creates a risk of data loss. [Oversharding](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html) can cause excessive shard movement. Avoiding oversharding can also help control costs and improve performance. Note that creating snapshots generates inter-node data transfer. The *storage* cost of snapshots is detailed later in this document. +The largest contributor to inter-node data transfer is usually shard movement between nodes in a cluster. The only way to prevent shard movement is by having a single node in a single availability zone. This solution is only possible for clusters up to 64GB RAM and is not recommended as it creates a risk of data loss. [Oversharding](/deploy-manage/production-guidance/optimize-performance/size-shards.md) can cause excessive shard movement. Avoiding oversharding can also help control costs and improve performance. Note that creating snapshots generates inter-node data transfer. The *storage* cost of snapshots is detailed later in this document. The exact root cause of unusual data transfer is not always something we can identify as it can have many causes, some of which are out of our control and not associated with Cloud configuration changes. It may help to [enable monitoring](../../monitor/stack-monitoring/elastic-cloud-stack-monitoring.md) and examine index and shard activity on your cluster. diff --git a/deploy-manage/cloud-organization/tools-and-apis.md b/deploy-manage/cloud-organization/tools-and-apis.md index 0cb89d8639..f45c4cc399 100644 --- a/deploy-manage/cloud-organization/tools-and-apis.md +++ b/deploy-manage/cloud-organization/tools-and-apis.md @@ -10,7 +10,7 @@ Most Elastic resources can be accessed and managed through RESTful APIs. While t Elasticsearch Service API : You can use the Elasticsearch Service API to manage your deployments and all of the resources associated with them. This includes performing deployment CRUD operations, scaling or autoscaling resources, and managing traffic filters, deployment extensions, remote clusters, and Elastic Stack versions. You can also access cost data by deployment and by organization. - To learn more about the Elasticsearch Service API, read through the [API overview](https://www.elastic.co/guide/en/cloud/current/ec-restful-api.html), try out some [getting started examples](https://www.elastic.co/guide/en/cloud/current/ec-api-examples.html), and check our [API reference documentation](https://www.elastic.co/docs/api/doc/cloud). + To learn more about the Elasticsearch Service API, read through the [API overview](https://www.elastic.co/guide/en/cloud/current/ec-restful-api.html), try out some [getting started examples](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/api-examples.md), and check our [API reference documentation](https://www.elastic.co/docs/api/doc/cloud). Calls to the Elasticsearch Service API are subject to [Rate limiting](https://www.elastic.co/guide/en/cloud/current/ec-api-rate-limiting.html). @@ -18,7 +18,7 @@ Elasticsearch Service API Elasticsearch APIs : This set of APIs allows you to interact directly with the Elasticsearch nodes in your deployment. You can ingest data, run search queries, check the health of your clusters, manage snapshots, and more. - To use these APIs in Elasticsearch Service read our topic [Access the Elasticsearch API console](https://www.elastic.co/guide/en/cloud/current/ec-api-console.html), and to learn about all of the available endpoints check the [Elasticsearch API reference documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html). + To use these APIs in Elasticsearch Service read our topic [Access the Elasticsearch API console](https://www.elastic.co/guide/en/cloud/current/ec-api-console.html), and to learn about all of the available endpoints check the [Elasticsearch API reference documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/index.md). Some [restrictions](../deploy/elastic-cloud/restrictions-known-problems.md#ec-restrictions-apis-elasticsearch) apply when using the Elasticsearch APIs in Elasticsearch Service. @@ -37,7 +37,7 @@ Other Products * [APM event intake API Reference](/solutions/observability/apps/elastic-apm-events-intake-api.md) * [App Search API Reference](https://www.elastic.co/guide/en/app-search/current/api-reference.html) * [Elastic Security APIs](https://www.elastic.co/guide/en/security/current/security-apis.html) - * [Fleet APIs](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html) + * [Fleet APIs](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/fleet-api-docs.md) * [Logstash APIs](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html) * [Workplace Search API Reference](https://www.elastic.co/guide/en/workplace-search/current/workplace-search-api-overview.html) diff --git a/deploy-manage/deploy/cloud-enterprise/assign-roles-to-hosts.md b/deploy-manage/deploy/cloud-enterprise/assign-roles-to-hosts.md index 654c2f0805..8bfc87d42b 100644 --- a/deploy-manage/deploy/cloud-enterprise/assign-roles-to-hosts.md +++ b/deploy-manage/deploy/cloud-enterprise/assign-roles-to-hosts.md @@ -30,7 +30,7 @@ Once the `director` role is assigned to a runner, the Zookeeper service starts o Each role is associated with a set of Docker containers that provide the specific functionality. -There are some additional roles shown in the Cloud UI, such as the [beats-runner](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html#glossary-beats-runner) and [services-forwarder](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html#glossary-services-forwarder) roles, that are required by Elastic Cloud Enterprise and that you cannot modify. +There are some additional roles shown in the Cloud UI, such as the [beats-runner](asciidocalypse://docs/docs-content/docs/reference/glossary/index.md#glossary-beats-runner) and [services-forwarder](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html#glossary-services-forwarder) roles, that are required by Elastic Cloud Enterprise and that you cannot modify. To assign roles to hosts: diff --git a/deploy-manage/deploy/cloud-enterprise/ce-add-support-for-node-roles-autoscaling.md b/deploy-manage/deploy/cloud-enterprise/ce-add-support-for-node-roles-autoscaling.md index 1bf5f4e294..e6b3adca81 100644 --- a/deploy-manage/deploy/cloud-enterprise/ce-add-support-for-node-roles-autoscaling.md +++ b/deploy-manage/deploy/cloud-enterprise/ce-add-support-for-node-roles-autoscaling.md @@ -12,12 +12,12 @@ System owned deployment templates have already been updated to support both data ## Adding support for node_roles [ece_adding_support_for_node_roles] -The `node_roles` field defines the roles that an Elasticsearch topology element can have, which is used in place of `node_type` when a new feature such as autoscaling is enabled, or when a new data tier is added. This field is supported on [Elastic stack versions 7.10 and above](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-node-types.html). +The `node_roles` field defines the roles that an Elasticsearch topology element can have, which is used in place of `node_type` when a new feature such as autoscaling is enabled, or when a new data tier is added. This field is supported on [Elastic stack versions 7.10 and above](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-enterprise/changes-to-index-allocation-api.md). There are a number of fields that need to be added to each Elasticsearch node in order to support `node_roles`: * **id**: Unique identifier of the topology element. This field, along with the `node_roles`, identifies an Elasticsearch topology element. -* **node_roles**: The list of node roles. Allowable roles are: `master`, `ingest`, `ml`, `data_hot`, `data_content`, `data_warm`, `data_cold`, `data_frozen`, `remote_cluster_client`, and `transform`. For details, check [Node roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles). +* **node_roles**: The list of node roles. Allowable roles are: `master`, `ingest`, `ml`, `data_hot`, `data_content`, `data_warm`, `data_cold`, `data_frozen`, `remote_cluster_client`, and `transform`. For details, check [Node roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles). * **topology_element_control**: Controls for the topology element. * **min**: The absolute minimum size limit for a topology element. If the value is `0`, that means the topology element can be disabled. diff --git a/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md b/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md index 8054b7e383..6f7dc9b832 100644 --- a/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md +++ b/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md @@ -48,7 +48,7 @@ If you have an App Search instance, after specifying a new deployment domain nam ::::{note} -The built-in Proxy Certificate only validates against the default endpoint format described on this page. Once you change it, it is necessary to upload a new Proxy Certificate as described in [Manage security certificates](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-manage-certificates.html). For test only, clients can be configured with hostname verification disabled until the new certificate is uploaded. +The built-in Proxy Certificate only validates against the default endpoint format described on this page. Once you change it, it is necessary to upload a new Proxy Certificate as described in [Manage security certificates](/deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md). For test only, clients can be configured with hostname verification disabled until the new certificate is uploaded. :::: diff --git a/deploy-manage/deploy/cloud-enterprise/configure-host-rhel-cloud.md b/deploy-manage/deploy/cloud-enterprise/configure-host-rhel-cloud.md index 481e4aca90..bef34bece0 100644 --- a/deploy-manage/deploy/cloud-enterprise/configure-host-rhel-cloud.md +++ b/deploy-manage/deploy/cloud-enterprise/configure-host-rhel-cloud.md @@ -20,7 +20,7 @@ Create a RHEL 8 (the version must be >= 8.5, but <9), RHEL 9, Rocky Linux 8, or * For RHEL 8, follow your internal guidelines to add a vanilla RHEL 8 VM to your environment. Note that the version must be >= 8.5, but <9. -Verify that required traffic is allowed. Check the [Networking prerequisites](ece-networking-prereq.md) and [Google Cloud Platform (GCP)](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-prereqs.html) guidelines for a list of ports that need to be open. The technical configuration highly depends on the underlying infrastructure. +Verify that required traffic is allowed. Check the [Networking prerequisites](ece-networking-prereq.md) and [Google Cloud Platform (GCP)](/deploy-manage/deploy/cloud-enterprise/prepare-environment.md) guidelines for a list of ports that need to be open. The technical configuration highly depends on the underlying infrastructure. **Example:** For AWS, allowing traffic between hosts is implemented using security groups. @@ -283,7 +283,7 @@ Verify that required traffic is allowed. Check the [Networking prerequisites](ec # enable forwarding so the Docker networking works as expected net.ipv4.ip_forward=1 # Decrease the maximum number of TCP retransmissions to 5 as recommended for Elasticsearch TCP retransmission timeout. - # See https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config-tcpretries.html + # See /deploy-manage/deploy/self-managed/system-config-tcpretries.md net.ipv4.tcp_retries2=5 # Make sure the host doesn't swap too early vm.swappiness=1 diff --git a/deploy-manage/deploy/cloud-enterprise/deployment-templates.md b/deploy-manage/deploy/cloud-enterprise/deployment-templates.md index 3e93f71a3e..917f5309f1 100644 --- a/deploy-manage/deploy/cloud-enterprise/deployment-templates.md +++ b/deploy-manage/deploy/cloud-enterprise/deployment-templates.md @@ -32,7 +32,7 @@ The deployment templates available are: * **Cross-cluster search template** - This template manages remote connections for running Elasticsearch queries across multiple deployments and indices. These federated searches make it possible to break up large deployments into smaller, more resilient Elasticsearch clusters. You can organize deployments by departments or projects for example, but still have the ability to aggregate query results and get visibility into your Elastic Cloud Enterprise infrastructure. You can add remote connections either when you create your deployment or when you customize it. To know more about cross-cluster search, check [Enable cross-cluster search](https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html). + This template manages remote connections for running Elasticsearch queries across multiple deployments and indices. These federated searches make it possible to break up large deployments into smaller, more resilient Elasticsearch clusters. You can organize deployments by departments or projects for example, but still have the ability to aggregate query results and get visibility into your Elastic Cloud Enterprise infrastructure. You can add remote connections either when you create your deployment or when you customize it. To know more about cross-cluster search, check [Enable cross-cluster search](/deploy-manage/remote-clusters/ec-enable-ccs.md). * **Elastic Security template** diff --git a/deploy-manage/deploy/cloud-enterprise/ece-configure-templates-index-management.md b/deploy-manage/deploy/cloud-enterprise/ece-configure-templates-index-management.md index 74563c918a..bf00eb5bf9 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-configure-templates-index-management.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-configure-templates-index-management.md @@ -39,7 +39,7 @@ To configure index management when you create a deployment template: Index curation : Creates new indices on hot nodes first and moves them to warm nodes later on, based on the data views (formerly *index patterns*) you specify. Also manages replica counts for you, so that all shards of an index can fit on the right data nodes. Compared to index lifecycle management, index curation for time-based indices supports only one action, to move indices from nodes on one data configuration to another, but it is more straightforward to set up initially and all setup can be done directly from the Cloud UI. - If your user need to delete indices once they are no longer useful to them, they can run [Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html) on-premise to manage indices for Elasticsearch clusters hosted on Elastic Cloud Enterprise. + If your user need to delete indices once they are no longer useful to them, they can run [Curator](asciidocalypse://docs/curator/docs/reference/elasticsearch/elasticsearch-client-curator/index.md) on-premise to manage indices for Elasticsearch clusters hosted on Elastic Cloud Enterprise. To configure index curation: diff --git a/deploy-manage/deploy/cloud-enterprise/ece-ha.md b/deploy-manage/deploy/cloud-enterprise/ece-ha.md index f6766b75ac..72a5fcb045 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-ha.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-ha.md @@ -19,7 +19,7 @@ The main difference between Elastic Cloud Enterprise installations that include We recommend that for each deployment you use at least two availability zones for production and three for mission-critical systems. Using more than three availability zones for a deployment is not required nor supported. Availability zones are intended for high availability, not scalability. ::::{warning} -{{es}} clusters that are set up to use only one availability zone are not [highly available](https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability-cluster-design.html) and are at risk of data loss. To safeguard against data loss, you must use at least two {{ece}} availability zones. +{{es}} clusters that are set up to use only one availability zone are not [highly available](/deploy-manage/production-guidance/availability-and-resilience.md) and are at risk of data loss. To safeguard against data loss, you must use at least two {{ece}} availability zones. :::: @@ -31,7 +31,7 @@ Increasing the number of zones should not be used to add more resources. The con ## Master nodes [ece-ece-ha-2-master-nodes] -$$$ece-ha-tiebreaker$$$Tiebreakers are used in distributed clusters to avoid cases of [split brain](https://en.wikipedia.org/wiki/Split-brain_(computing)), where an {{es}} cluster splits into multiple, autonomous parts that continue to handle requests independently of each other, at the risk of affecting cluster consistency and data loss. A split-brain scenario is avoided by making sure that a minimum number of [master-eligible nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#master-node) must be present in order for any part of the cluster to elect a master node and accept user requests. To prevent multiple parts of a cluster from being eligible, there must be a [quorum-based majority](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-quorums.html) of `(n/2)+1` nodes, where `n` is the number of master-eligible nodes in the cluster. The minimum number of master nodes to reach quorum in a two-node cluster is the same as for a three-node cluster: two nodes must be available. +$$$ece-ha-tiebreaker$$$Tiebreakers are used in distributed clusters to avoid cases of [split brain](https://en.wikipedia.org/wiki/Split-brain_(computing)), where an {{es}} cluster splits into multiple, autonomous parts that continue to handle requests independently of each other, at the risk of affecting cluster consistency and data loss. A split-brain scenario is avoided by making sure that a minimum number of [master-eligible nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#master-node) must be present in order for any part of the cluster to elect a master node and accept user requests. To prevent multiple parts of a cluster from being eligible, there must be a [quorum-based majority](/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md) of `(n/2)+1` nodes, where `n` is the number of master-eligible nodes in the cluster. The minimum number of master nodes to reach quorum in a two-node cluster is the same as for a three-node cluster: two nodes must be available. When you create a cluster with nodes in two availability zones when a third zone is available, Elastic Cloud Enterprise can create a tiebreaker in the third availability zone to help establish quorum in case of loss of an availability zone. The extra tiebreaker node that helps to provide quorum does not have to be a full-fledged and expensive node, as it does not hold data. For example: By tagging allocators hosts in Elastic Cloud Enterprise, can you create a cluster with eight nodes each in zones `ece-1a` and `ece-1b`, for a total of 16 nodes, and one tiebreaker node in zone `ece-1c`. This cluster can lose any of the three availability zones whilst maintaining quorum, which means that the cluster can continue to process user requests, provided that there is sufficient capacity available when an availability zone goes down. @@ -52,14 +52,14 @@ GET _all/_settings/index.number_of_replicas ``` ::::{warning} -Indices with no replica, except for [searchable snapshot indices](https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots.html), are not highly available. You should use replicas to mitigate against possible data loss. +Indices with no replica, except for [searchable snapshot indices](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md), are not highly available. You should use replicas to mitigate against possible data loss. :::: ## Snapshot backups [ece-ece-ha-4-snapshot] -You should configure and use [{{es}} snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html). Snapshots provide a way to backup and restore your {{es}} indices. They can be used to copy indices for testing, to recover from failures or accidental deletions, or to migrate data to other deployments. We recommend configuring an [{{ece}}-level repository](../../tools/snapshot-and-restore/cloud-enterprise.md) to apply across all deployments. See [Work with snapshots](../../tools/snapshot-and-restore.md) for more guidance. +You should configure and use [{{es}} snapshots](/deploy-manage/tools/snapshot-and-restore.md). Snapshots provide a way to backup and restore your {{es}} indices. They can be used to copy indices for testing, to recover from failures or accidental deletions, or to migrate data to other deployments. We recommend configuring an [{{ece}}-level repository](../../tools/snapshot-and-restore/cloud-enterprise.md) to apply across all deployments. See [Work with snapshots](../../tools/snapshot-and-restore.md) for more guidance. ## Furthermore considerations [ece-ece-ha-5-other] diff --git a/deploy-manage/deploy/cloud-enterprise/ece-manage-capacity.md b/deploy-manage/deploy/cloud-enterprise/ece-manage-capacity.md index 5daf15cd36..ef5c4662a6 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-manage-capacity.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-manage-capacity.md @@ -36,7 +36,7 @@ curl -X PUT \ -d '{"capacity":}' ``` -For more information on how to use API keys for authentication, check the section [Access the API from the Command Line](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-api-command-line.html). +For more information on how to use API keys for authentication, check the section [Access the API from the Command Line](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-enterprise/ece-api-command-line.md). ::::{important} Prior to ECE 3.5.0, regardless of the use of this API, the [CPU quota](#ece-alloc-cpu) used the memory specified at installation time. @@ -88,7 +88,7 @@ Those percentages represent the upper limit of the % of the total CPU resources In addition to the [CPU quotas](#ece-alloc-cpu), the `processors` setting also plays a relevant role. -The allocated `processors` setting originates from Elasticsearch and is responsible for calculating your [thread pools](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html#node.processors). While the CPU quota defines the percentage of the total CPU resources of an allocator that are assigned to an instance, the allocated `processors` define how the thread pools are calculated in Elasticsearch, and therefore how many concurrent search and indexing requests an instance can process. In other words, the CPU ratio defines how fast a single task can be completed, while the `processors` setting defines how many different tasks can be completed at the same time. +The allocated `processors` setting originates from Elasticsearch and is responsible for calculating your [thread pools](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/thread-pool-settings.md#node.processors). While the CPU quota defines the percentage of the total CPU resources of an allocator that are assigned to an instance, the allocated `processors` define how the thread pools are calculated in Elasticsearch, and therefore how many concurrent search and indexing requests an instance can process. In other words, the CPU ratio defines how fast a single task can be completed, while the `processors` setting defines how many different tasks can be completed at the same time. We rely on Elasticsearch and the `-XX:ActiveProcessorCount` JVM setting to automatically detect the allocated `processors`. diff --git a/deploy-manage/deploy/cloud-enterprise/find-cloud-id.md b/deploy-manage/deploy/cloud-enterprise/find-cloud-id.md index 9fbae64b58..08e8d264f1 100644 --- a/deploy-manage/deploy/cloud-enterprise/find-cloud-id.md +++ b/deploy-manage/deploy/cloud-enterprise/find-cloud-id.md @@ -39,7 +39,7 @@ To use the Cloud ID, you need: * The unique Cloud ID for your deployment, available from the deployment overview page. * A user ID and password that has permission to send data to your cluster. - In our examples, we use the `elastic` superuser that every Elasticsearch cluster comes with. The password for the `elastic` user is provided when you create a deployment (and can also be [reset](../../users-roles/cluster-or-deployment-auth/built-in-users.md) if you forget it). On a production system, you should adapt these examples by creating a user that can write to and access only the minimally required indices. For each Beat, review the specific feature and role table, similar to the one in [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html) documentation. + In our examples, we use the `elastic` superuser that every Elasticsearch cluster comes with. The password for the `elastic` user is provided when you create a deployment (and can also be [reset](../../users-roles/cluster-or-deployment-auth/built-in-users.md) if you forget it). On a production system, you should adapt these examples by creating a user that can write to and access only the minimally required indices. For each Beat, review the specific feature and role table, similar to the one in [Metricbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/feature-roles.md) documentation. @@ -48,7 +48,7 @@ To use the Cloud ID, you need: The following example shows how you can send operational data from Metricbeat to Elastic Cloud Enterprise by using the Cloud ID. Any of the available Beats will work, but we had to pick one for this example. ::::{tip} -For others, you can learn more about [getting started](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html) with each Beat. +For others, you can learn more about [getting started](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md) with each Beat. :::: @@ -57,8 +57,8 @@ To get started with Metricbeat and Elastic Cloud Enterprise: 1. [Log into the Cloud UI](log-into-cloud-ui.md). 2. [Create a new deployment](create-deployment.md) and copy down the password for the `elastic` user. 3. On the deployment overview page, copy down the Cloud ID. -4. Set up the Beat of your choice, such as [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html). -5. [Configure the Beat output to send to Elastic Cloud](https://www.elastic.co/guide/en/beats/metricbeat/current/configure-cloud-id.html). +4. Set up the Beat of your choice, such as [Metricbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md). +5. [Configure the Beat output to send to Elastic Cloud](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configure-cloud-id.md). ::::{note} Make sure you replace the values for `cloud.id` and `cloud.auth` with your own information. diff --git a/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md b/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md index 5efe1a8a73..8867cc8f58 100644 --- a/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md +++ b/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md @@ -42,7 +42,7 @@ Otherwise, when the file content changes, the corresponding user is mentioned as 1. Make sure you are running a healthy x-node ECE environment ready to be upgraded. All nodes use the Docker container runtime. 2. Upgrade to ECE 3.3.0+ following the [Upgrade your installation](../../upgrade/orchestrator/upgrade-cloud-enterprise.md) guideline. Skip this step if your existing ECE installation already runs ECE >= 3.3.0. 3. Follow your internal guidelines to add an additional vanilla RHEL (Note that the version must be >= 8.5, but <9), or Rocky Linux 8 or 9 VM to your environment. -4. Verify that required traffic from the host added in step 3 is allowed to the primary ECE VM(s). Check the [Networking prerequisites](ece-networking-prereq.md) and [Google Cloud Platform (GCP)](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-prereqs.html) guidelines for a list of ports that need to be open. The technical configuration highly depends on the underlying infrastructure. +4. Verify that required traffic from the host added in step 3 is allowed to the primary ECE VM(s). Check the [Networking prerequisites](ece-networking-prereq.md) and [Google Cloud Platform (GCP)](/deploy-manage/deploy/cloud-enterprise/prepare-environment.md) guidelines for a list of ports that need to be open. The technical configuration highly depends on the underlying infrastructure. **Example** For AWS, allowing traffic between hosts is implemented using security groups. @@ -322,7 +322,7 @@ Otherwise, when the file content changes, the corresponding user is mentioned as # enable forwarding so the Docker networking works as expected net.ipv4.ip_forward=1 # Decrease the maximum number of TCP retransmissions to 5 as recommended for Elasticsearch TCP retransmission timeout. - # See https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config-tcpretries.html + # See /deploy-manage/deploy/self-managed/system-config-tcpretries.md net.ipv4.tcp_retries2=5 # Make sure the host doesn't swap too early vm.swappiness=1 diff --git a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-logstash.md b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-logstash.md index 2d0183b1c0..037676cabf 100644 --- a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-logstash.md +++ b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-logstash.md @@ -36,7 +36,7 @@ spec: You can specify sensitive settings with Kubernetes secrets. ECK automatically injects these settings into the keystore before it starts Logstash. The ECK operator continues to watch the secrets for changes and will restart Logstash Pods when it detects a change. -The Logstash Keystore can be password protected by setting an environment variable called `LOGSTASH_KEYSTORE_PASS`. Check out [Logstash Keystore](https://www.elastic.co/guide/en/logstash/current/keystore.html#keystore-password) documentation for details. +The Logstash Keystore can be password protected by setting an environment variable called `LOGSTASH_KEYSTORE_PASS`. Check out [Logstash Keystore](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/keystore.md#keystore-password) documentation for details. ```yaml apiVersion: v1 diff --git a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-maps-server.md b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-maps-server.md index f789eaee80..b17813f2f9 100644 --- a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-maps-server.md +++ b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration-maps-server.md @@ -20,7 +20,7 @@ If you already looked at the [Elasticsearch on ECK](elasticsearch-configuration. ## Elastic Maps Server configuration [k8s-maps-configuration] -You can add any valid Elastic Maps Server setting as documented on the [product](https://www.elastic.co/guide/en/kibana/current/maps-connect-to-ems.html#elastic-maps-server-configuration) page to the `spec.config` section. +You can add any valid Elastic Maps Server setting as documented on the [product](/explore-analyze/visualize/maps/maps-connect-to-ems.md#elastic-maps-server-configuration) page to the `spec.config` section. The following example demonstrates how to set the log level to `debug`: diff --git a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration.md b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration.md index 3390f9e231..302fcb396e 100644 --- a/deploy-manage/deploy/cloud-on-k8s/advanced-configuration.md +++ b/deploy-manage/deploy/cloud-on-k8s/advanced-configuration.md @@ -16,7 +16,7 @@ This section covers the following topics: ## Use APM Agent central configuration [k8s-apm-agent-central-configuration] -[APM Agent configuration management](https://www.elastic.co/guide/en/observability/current/apm-agent-configuration.html) [7.5.1] allows you to configure your APM Agents centrally from the Kibana APM app. To use this feature, the APM Server needs to be configured with connection details of the Kibana instance. If Kibana is managed by ECK, you can simply add a `kibanaRef` attribute to the APM Server specification: +[APM Agent configuration management](/solutions/observability/apps/apm-agent-central-configuration.md) [7.5.1] allows you to configure your APM Agents centrally from the Kibana APM app. To use this feature, the APM Server needs to be configured with connection details of the Kibana instance. If Kibana is managed by ECK, you can simply add a `kibanaRef` attribute to the APM Server specification: ```yaml cat < 9200/TCP 34m ``` -In order to make requests to the [{{es}} API](https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html): +In order to make requests to the [{{es}} API](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/index.md): 1. Get the credentials. diff --git a/deploy-manage/deploy/cloud-on-k8s/fleet-managed-elastic-agent.md b/deploy-manage/deploy/cloud-on-k8s/fleet-managed-elastic-agent.md index af36fc1712..b3fb75721c 100644 --- a/deploy-manage/deploy/cloud-on-k8s/fleet-managed-elastic-agent.md +++ b/deploy-manage/deploy/cloud-on-k8s/fleet-managed-elastic-agent.md @@ -7,7 +7,7 @@ mapped_pages: # Fleet-managed Elastic Agent [k8s-elastic-agent-fleet] -This section describes how to configure and deploy {{agent}} in [{{fleet}}-managed](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) mode with ECK. Check the [Standalone section](standalone-elastic-agent.md) if you want to run {{agent}} in the [standalone mode](https://www.elastic.co/guide/en/fleet/current/install-standalone-elastic-agent.html). +This section describes how to configure and deploy {{agent}} in [{{fleet}}-managed](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md) mode with ECK. Check the [Standalone section](standalone-elastic-agent.md) if you want to run {{agent}} in the [standalone mode](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-standalone-elastic-agent.md). * [Quickstart](quickstart-fleet.md) * [Configuration](configuration-fleet.md) diff --git a/deploy-manage/deploy/cloud-on-k8s/http-configuration.md b/deploy-manage/deploy/cloud-on-k8s/http-configuration.md index 8ab1de6f85..6974660fe8 100644 --- a/deploy-manage/deploy/cloud-on-k8s/http-configuration.md +++ b/deploy-manage/deploy/cloud-on-k8s/http-configuration.md @@ -30,7 +30,7 @@ You can disable the generation of the self-signed certificate and hence disable ### Ingress and Kibana configuration [k8s-maps-ingress] -To use Elastic Maps Server from your Kibana instances, you need to configure Kibana to fetch maps from your Elastic Maps Server instance by using the [`map.emsUrl`](https://www.elastic.co/guide/en/kibana/current/maps-connect-to-ems.html#elastic-maps-server-kibana) configuration key. The value of this setting needs to be the URL where the Elastic Maps Server instance is reachable from your browser. The certificates presented by Elastic Maps Server need to be trusted by the browser, and the URL must have the same origin as the URL where your Kibana is hosted to avoid cross origin resource issues. Check the [recipe section](https://github.com/elastic/cloud-on-k8s/tree/2.16/config/recipes/) for an example on how to set this up using an Ingress resource. +To use Elastic Maps Server from your Kibana instances, you need to configure Kibana to fetch maps from your Elastic Maps Server instance by using the [`map.emsUrl`](/explore-analyze/visualize/maps/maps-connect-to-ems.md#elastic-maps-server-kibana) configuration key. The value of this setting needs to be the URL where the Elastic Maps Server instance is reachable from your browser. The certificates presented by Elastic Maps Server need to be trusted by the browser, and the URL must have the same origin as the URL where your Kibana is hosted to avoid cross origin resource issues. Check the [recipe section](https://github.com/elastic/cloud-on-k8s/tree/2.16/config/recipes/) for an example on how to set this up using an Ingress resource. diff --git a/deploy-manage/deploy/cloud-on-k8s/k8s-kibana-advanced-configuration.md b/deploy-manage/deploy/cloud-on-k8s/k8s-kibana-advanced-configuration.md index b4ae4b565b..78a8d9c2b5 100644 --- a/deploy-manage/deploy/cloud-on-k8s/k8s-kibana-advanced-configuration.md +++ b/deploy-manage/deploy/cloud-on-k8s/k8s-kibana-advanced-configuration.md @@ -80,9 +80,9 @@ spec: To deploy more than one instance of {{kib}}, the instances must share a matching set of encryption keys. The following keys are automatically generated by the operator: -* [`xpack.security.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#xpack-security-encryptionKey) -* [`xpack.reporting.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html#encryption-keys) -* [`xpack.encryptedSavedObjects.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/xpack-security-secure-saved-objects.html) +* [`xpack.security.encryptionKey`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#xpack-security-encryptionKey) +* [`xpack.reporting.encryptionKey`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/reporting-settings.md#encryption-keys) +* [`xpack.encryptedSavedObjects.encryptionKey`](/deploy-manage/security/secure-saved-objects.md) ::::{tip} If you need to access these encryption keys, you can find them using the `kubectl get secrets` command. @@ -99,7 +99,7 @@ kubectl get secret my-kibana-kb-config -o jsonpath='{ .data.kibana\.yml }' | bas You can provide your own encryption keys using a secure setting, as described in [Secure settings](k8s-kibana-secure-settings.md). ::::{note} -While most reconfigurations of your {{kib}} instances are carried out in rolling upgrade fashion, all version upgrades will cause {{kib}} downtime. This happens because you can only run a single version of {{kib}} at any given time. For more information, check [Upgrade {{kib}}](https://www.elastic.co/guide/en/kibana/current/upgrade.html). +While most reconfigurations of your {{kib}} instances are carried out in rolling upgrade fashion, all version upgrades will cause {{kib}} downtime. This happens because you can only run a single version of {{kib}} at any given time. For more information, check [Upgrade {{kib}}](/deploy-manage/upgrade/deployment-or-cluster.md). :::: diff --git a/deploy-manage/deploy/cloud-on-k8s/logstash-plugins.md b/deploy-manage/deploy/cloud-on-k8s/logstash-plugins.md index 33b0b2005e..71e2dcd3c3 100644 --- a/deploy-manage/deploy/cloud-on-k8s/logstash-plugins.md +++ b/deploy-manage/deploy/cloud-on-k8s/logstash-plugins.md @@ -7,7 +7,7 @@ mapped_pages: # Logstash plugins [k8s-logstash-plugins] -The power of {{ls}} is in the plugins--{{logstash-ref}}/input-plugins.html[inputs], [outputs](https://www.elastic.co/guide/en/logstash/current/output-plugins.html), [filters,](https://www.elastic.co/guide/en/logstash/current/filter-plugins.html) and [codecs](https://www.elastic.co/guide/en/logstash/current/codec-plugins.html). +The power of {{ls}} is in the plugins--{{logstash-ref}}/input-plugins.html[inputs], [outputs](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/output-plugins.md), [filters,](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/filter-plugins.md) and [codecs](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/codec-plugins.md). In {{ls}} on ECK, you can use the same plugins that you use for other {{ls}} instances—​including Elastic-supported, community-supported, and custom plugins. However, you may have other factors to consider, such as how you configure your {{k8s}} resources, how you specify additional resources, and how you scale your {{ls}} installation. @@ -89,7 +89,7 @@ spec: **Static read-only files** -Some plugins require or allow access to small static read-only files. You can use these for a variety of reasons. Examples include adding custom `grok` patterns for [`logstash-filter-grok`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html) to use for lookup, source code for [`logstash-filter-ruby`], a dictionary for [`logstash-filter-translate`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-translate.html) or the location of a SQL statement for [`logstash-input-jdbc`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html). Make these files available to the {{ls}} resource in your manifest. +Some plugins require or allow access to small static read-only files. You can use these for a variety of reasons. Examples include adding custom `grok` patterns for [`logstash-filter-grok`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-grok.md) to use for lookup, source code for [`logstash-filter-ruby`], a dictionary for [`logstash-filter-translate`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-translate.md) or the location of a SQL statement for [`logstash-input-jdbc`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-jdbc.md). Make these files available to the {{ls}} resource in your manifest. ::::{tip} In the plugin documentation, these plugin settings are typically identified by `path` or an `array` of `paths`. @@ -98,7 +98,7 @@ In the plugin documentation, these plugin settings are typically identified by ` To use these in your manifest, create a ConfigMap or Secret representing the asset, a Volume in your `podTemplate.spec` containing the ConfigMap or Secret, and mount that Volume with a VolumeMount in your `podTemplateSpec.container` section of your {{ls}} resource. -This example illustrates configuring a ConfigMap from a ruby source file, and including it in a [`logstash-filter-ruby`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-ruby.html) plugin. +This example illustrates configuring a ConfigMap from a ruby source file, and including it in a [`logstash-filter-ruby`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-ruby.md) plugin. First, create the ConfigMap. @@ -238,7 +238,7 @@ After you build and deploy the custom image, include it in the {{ls}} manifest. ### Writable storage [k8s-logstash-working-with-plugins-writable] -Some {{ls}} plugins need access to writable storage. This could be for checkpointing to keep track of events already processed, a place to temporarily write events before sending a batch of events, or just to actually write events to disk in the case of [`logstash-output-file`](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-file.html). +Some {{ls}} plugins need access to writable storage. This could be for checkpointing to keep track of events already processed, a place to temporarily write events before sending a batch of events, or just to actually write events to disk in the case of [`logstash-output-file`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-file.md). {{ls}} on ECK by default supplies a small 1.5 GiB (gibibyte) default persistent volume to each pod. This volume is called `logstash-data` and is located at `/usr/logstash/data`, and is typically the default location for most plugin use cases. This volume is stable across restarts of {{ls}} pods and is suitable for many use cases. @@ -332,7 +332,7 @@ spec: ::::{admonition} Horizontal scaling for {{ls}} plugins * Not all {{ls}} deployments can be scaled horizontally by increasing the number of {{ls}} Pods defined in the {{ls}} resource. Depending on the types of plugins in a {{ls}} installation, increasing the number of pods may cause data duplication, data loss, incorrect data, or may waste resources with pods unable to be utilized correctly. -* The ability of a {{ls}} installation to scale horizontally is bound by its most restrictive plugin(s). Even if all pipelines are using [`logstash-input-elastic_agent`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) or [`logstash-input-beats`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html) which should enable full horizontal scaling, introducing a more restrictive input or filter plugin forces the restrictions for pod scaling associated with that plugin. +* The ability of a {{ls}} installation to scale horizontally is bound by its most restrictive plugin(s). Even if all pipelines are using [`logstash-input-elastic_agent`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-elastic_agent.md) or [`logstash-input-beats`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-beats.md) which should enable full horizontal scaling, introducing a more restrictive input or filter plugin forces the restrictions for pod scaling associated with that plugin. :::: @@ -344,12 +344,12 @@ spec: * They **must** specify `pipeline.workers=1` for any pipelines that use them. * The number of pods cannot be scaled above 1. -Examples of aggregating filters include [`logstash-filter-aggregate`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-aggregate.html), [`logstash-filter-csv`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-csv.html) when `autodetect_column_names` set to `true`, and any [`logstash-filter-ruby`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-ruby.html) implementations that perform aggregations. +Examples of aggregating filters include [`logstash-filter-aggregate`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-aggregate.md), [`logstash-filter-csv`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-csv.md) when `autodetect_column_names` set to `true`, and any [`logstash-filter-ruby`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-ruby.html) implementations that perform aggregations. ### Input plugins: events pushed to {{ls}} [k8s-logstash-inputs-data-pushed] -{{ls}} installations with inputs that enable {{ls}} to receive data should be able to scale freely and have load spread across them horizontally. These plugins include [`logstash-input-beats`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html), [`logstash-input-elastic_agent`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html), [`logstash-input-tcp`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html), and [`logstash-input-http`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html). +{{ls}} installations with inputs that enable {{ls}} to receive data should be able to scale freely and have load spread across them horizontally. These plugins include [`logstash-input-beats`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html), [`logstash-input-elastic_agent`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html), [`logstash-input-tcp`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-tcp.md), and [`logstash-input-http`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-http.md). ### Input plugins: {{ls}} maintains state [k8s-logstash-inputs-local-checkpoints] @@ -360,16 +360,16 @@ Note that plugins that retrieve data from external sources, and require some lev Input plugins that include configuration settings such as `sincedb`, `checkpoint` or `sql_last_run_metadata` may fall into this category. -Examples of these plugins include [`logstash-input-jdbc`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html) (which has no automatic way to split queries across {{ls}} instances), [`logstash-input-s3`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-s3.html) (which has no way to split which buckets to read across {{ls}} instances), or [`logstash-input-file`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html). +Examples of these plugins include [`logstash-input-jdbc`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html) (which has no automatic way to split queries across {{ls}} instances), [`logstash-input-s3`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-s3.md) (which has no way to split which buckets to read across {{ls}} instances), or [`logstash-input-file`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-file.md). ### Input plugins: external source stores state [k8s-logstash-inputs-external-state] {{ls}} installations that use input plugins that retrieve data from an external source, and **rely on the external source to store state** can scale based on the parameters of the external source. -For example, a {{ls}} installation that uses a [`logstash-input-kafka`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html) plugin to retrieve data can scale the number of pods up to the number of partitions used, as a partition can have at most one consumer belonging to the same consumer group. Any pods created beyond that threshold cannot be scheduled to receive data. +For example, a {{ls}} installation that uses a [`logstash-input-kafka`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-kafka.md) plugin to retrieve data can scale the number of pods up to the number of partitions used, as a partition can have at most one consumer belonging to the same consumer group. Any pods created beyond that threshold cannot be scheduled to receive data. -Examples of these plugins include [`logstash-input-kafka`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html), [`logstash-input-azure_event_hubs`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-azure_event_hubs.html), and [`logstash-input-kinesis`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kinesis.html). +Examples of these plugins include [`logstash-input-kafka`](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html), [`logstash-input-azure_event_hubs`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-azure_event_hubs.md), and [`logstash-input-kinesis`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-kinesis.md). @@ -389,12 +389,12 @@ Use these guidelines *in addition* to the general guidelines provided in [Scalin ### {{ls}} integration plugin [k8s-logstash-plugin-considerations-ls-integration] -When your pipeline uses the [`Logstash integration`](https://www.elastic.co/guide/en/logstash/current/plugins-integrations-logstash.html) plugin, add `keepalive=>false` to the [logstash-output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-logstash.html) definition to ensure that load balancing works correctly rather than keeping affinity to the same pod. +When your pipeline uses the [`Logstash integration`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-integrations-logstash.md) plugin, add `keepalive=>false` to the [logstash-output](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-logstash.md) definition to ensure that load balancing works correctly rather than keeping affinity to the same pod. ### Elasticsearch output plugin [k8s-logstash-plugin-considerations-es-output] -The [`elasticsearch output`](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) plugin requires certain roles to be configured in order to enable {{ls}} to communicate with {{es}}. +The [`elasticsearch output`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) plugin requires certain roles to be configured in order to enable {{ls}} to communicate with {{es}}. You can customize roles in {{es}}. Check out [creating custom roles](../../users-roles/cluster-or-deployment-auth/native.md) @@ -418,7 +418,7 @@ stringData: ### Elastic_integration filter plugin [k8s-logstash-plugin-considerations-integration-filter] -The [`elastic_integration filter`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-elastic_integration.html) plugin allows the use of [`ElasticsearchRef`](configuration-logstash.md#k8s-logstash-esref) and environment variables. +The [`elastic_integration filter`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-elastic_integration.md) plugin allows the use of [`ElasticsearchRef`](configuration-logstash.md#k8s-logstash-esref) and environment variables. ```json elastic_integration { @@ -447,7 +447,7 @@ stringData: ### Elastic Agent input and Beats input plugins [k8s-logstash-plugin-considerations-agent-beats] -When you use the [Elastic Agent input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) or the [Beats input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html), set the [`ttl`](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html#_ttl) value on the Agent or Beat to ensure that load is distributed appropriately. +When you use the [Elastic Agent input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) or the [Beats input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html), set the [`ttl`](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/logstash-output.md#_ttl) value on the Agent or Beat to ensure that load is distributed appropriately. @@ -455,7 +455,7 @@ When you use the [Elastic Agent input](https://www.elastic.co/guide/en/logstash/ If you need plugins in addition to those included in the standard {{ls}} distribution, you can add them. Create a custom Docker image that includes the installed plugins, using the `bin/logstash-plugin install` utility to add more plugins to the image so that they can be used by {{ls}} pods. -This sample Dockerfile installs the [`logstash-filter-tld`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-tld.html) plugin to the official {{ls}} Docker image: +This sample Dockerfile installs the [`logstash-filter-tld`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-tld.md) plugin to the official {{ls}} Docker image: ```shell FROM docker.elastic.co/logstash/logstash:8.16.1 diff --git a/deploy-manage/deploy/cloud-on-k8s/map-data.md b/deploy-manage/deploy/cloud-on-k8s/map-data.md index 7e22059b35..2915a9a8c7 100644 --- a/deploy-manage/deploy/cloud-on-k8s/map-data.md +++ b/deploy-manage/deploy/cloud-on-k8s/map-data.md @@ -16,7 +16,7 @@ The Elastic Maps Server Docker image contains only a few zoom levels of data. To ## Basemap download [k8s-maps-basemap-download] -You have to download the basemap ahead of time on a machine that is not air-gapped and populate a volume that can be mounted into the Elastic Maps Server Pods. Check also the [Elastic Maps Server documentation.](https://www.elastic.co/guide/en/kibana/current/maps-connect-to-ems.html#elastic-maps-server) +You have to download the basemap ahead of time on a machine that is not air-gapped and populate a volume that can be mounted into the Elastic Maps Server Pods. Check also the [Elastic Maps Server documentation.](/explore-analyze/visualize/maps/maps-connect-to-ems.md#elastic-maps-server) The procedure on how to get a Kubernetes volume populated with that data is outside the scope of this document, as it depends on your specific Kubernetes setup and choice of volume provisioner. This is a possible approach that works for most setups: diff --git a/deploy-manage/deploy/cloud-on-k8s/node-configuration.md b/deploy-manage/deploy/cloud-on-k8s/node-configuration.md index 68d572bedf..49fa301aab 100644 --- a/deploy-manage/deploy/cloud-on-k8s/node-configuration.md +++ b/deploy-manage/deploy/cloud-on-k8s/node-configuration.md @@ -33,5 +33,5 @@ spec: node.roles: ["data", "ingest", "ml", "transform"] ``` -For more information on Elasticsearch settings, check [Configuring Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html). +For more information on Elasticsearch settings, check [Configuring Elasticsearch](/deploy-manage/deploy/self-managed/configure-elasticsearch.md). diff --git a/deploy-manage/deploy/cloud-on-k8s/nodes-orchestration.md b/deploy-manage/deploy/cloud-on-k8s/nodes-orchestration.md index def6e10a8b..75250f6cd3 100644 --- a/deploy-manage/deploy/cloud-on-k8s/nodes-orchestration.md +++ b/deploy-manage/deploy/cloud-on-k8s/nodes-orchestration.md @@ -127,7 +127,7 @@ Depending on how the NodeSets are updated, ECK handles the Kubernetes resource r * The specification of an existing NodeSet is updated. For example, the Elasticsearch configuration, or the PodTemplate resources requirements. - ECK performs a rolling upgrade of the corresponding Elasticsearch nodes. It follows the [Elasticsearch rolling upgrade best practices](https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elasticsearch.html) to update the underlying Pods while maintaining the availability of the Elasticsearch cluster where possible. In most cases, the process simply involves restarting Elasticsearch nodes one-by-one. Note that some cluster topologies may be impossible to deploy without making the cluster unavailable (check [Limitations](#k8s-orchestration-limitations) ). + ECK performs a rolling upgrade of the corresponding Elasticsearch nodes. It follows the [Elasticsearch rolling upgrade best practices](/deploy-manage/upgrade/deployment-or-cluster.md) to update the underlying Pods while maintaining the availability of the Elasticsearch cluster where possible. In most cases, the process simply involves restarting Elasticsearch nodes one-by-one. Note that some cluster topologies may be impossible to deploy without making the cluster unavailable (check [Limitations](#k8s-orchestration-limitations) ). * An existing NodeSet is renamed. @@ -152,7 +152,7 @@ Due to relying on Kubernetes primitives such as StatefulSets, the ECK orchestrat * Clusters containing indices with no replicas -If an {{es}} node holds the only copy of a shard, this shard becomes unavailable while the node is upgraded. To ensure [high availability](https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability-cluster-design.html) it is recommended to configure clusters with three master nodes, more than one node per [data tier](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) and at least one replica per index. +If an {{es}} node holds the only copy of a shard, this shard becomes unavailable while the node is upgraded. To ensure [high availability](/deploy-manage/production-guidance/availability-and-resilience.md) it is recommended to configure clusters with three master nodes, more than one node per [data tier](/manage-data/lifecycle/data-tiers.md) and at least one replica per index. * Elasticsearch Pods may stay `Pending` during a rolling upgrade if the Kubernetes scheduler cannot re-schedule them back. This is especially important when using local PersistentVolumes. If the Kubernetes node bound to a local PersistentVolume does not have enough capacity to host an upgraded Pod which was temporarily removed, that Pod will stay `Pending`. * Rolling upgrades can only make progress if the Elasticsearch cluster health is green. There are exceptions to this rule if the cluster health is yellow and if the following conditions are satisfied: @@ -176,7 +176,7 @@ Advanced users may force an upgrade by manually deleting Pods themselves. The de Operations that reduce the number of nodes in the cluster cannot make progress without user intervention, if the Elasticsearch index replica settings are incompatible with the intended downscale. Specifically, if the Elasticsearch index settings demand a higher number of shard copies than data nodes in the cluster after the downscale operation, ECK cannot migrate the data away from the node about to be removed. You can address this in the following ways: * Adjust the Elasticsearch [index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) to a number of replicas that allow the desired node removal. -* Use [`auto_expand_replicas`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) to automatically adjust the replicas to the number of data nodes in the cluster. +* Use [`auto_expand_replicas`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-settings) to automatically adjust the replicas to the number of data nodes in the cluster. ## Advanced control during rolling upgrades [k8s-advanced-upgrade-control] diff --git a/deploy-manage/deploy/cloud-on-k8s/quickstart-fleet.md b/deploy-manage/deploy/cloud-on-k8s/quickstart-fleet.md index 26b4423ee7..4d64c41a65 100644 --- a/deploy-manage/deploy/cloud-on-k8s/quickstart-fleet.md +++ b/deploy-manage/deploy/cloud-on-k8s/quickstart-fleet.md @@ -213,5 +213,5 @@ ECK automatically configures secure connections between all components. {{fleet} kubectl logs -f elastic-agent-quickstart-agent-xbcxr ``` -4. Configure the policy used by {{agents}}. Check [{{agent}} policies](https://www.elastic.co/guide/en/fleet/current/agent-policy.html) for more details. +4. Configure the policy used by {{agents}}. Check [{{agent}} policies](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/agent-policy.md) for more details. diff --git a/deploy-manage/deploy/cloud-on-k8s/readiness-probe.md b/deploy-manage/deploy/cloud-on-k8s/readiness-probe.md index ec7ef13f50..760f12c064 100644 --- a/deploy-manage/deploy/cloud-on-k8s/readiness-probe.md +++ b/deploy-manage/deploy/cloud-on-k8s/readiness-probe.md @@ -45,6 +45,6 @@ Note that this requires restarting the Pods. % this feature might have disappeared, we will need to investigate this a bit more, as the link below doesn't work anymore but it does for 8.15 for example. -We do not recommend overriding the default readiness probe on Elasticsearch 8.2.0 and later. ECK configures a socket based readiness probe using the Elasticsearch [readiness port feature](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#readiness-tcp-port) which is not influenced by the load on the Elasticsearch cluster. +We do not recommend overriding the default readiness probe on Elasticsearch 8.2.0 and later. ECK configures a socket based readiness probe using the Elasticsearch [readiness port feature](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#readiness-tcp-port) which is not influenced by the load on the Elasticsearch cluster. diff --git a/deploy-manage/deploy/cloud-on-k8s/recipes.md b/deploy-manage/deploy/cloud-on-k8s/recipes.md index 6d546cab13..d4fb87b4bd 100644 --- a/deploy-manage/deploy/cloud-on-k8s/recipes.md +++ b/deploy-manage/deploy/cloud-on-k8s/recipes.md @@ -18,7 +18,7 @@ This section includes recipes that provide configuration examples for some commo * [Deploy Elasticsearch, Kibana, Elastic Fleet Server and Elastic Agent within GKE Autopilot](https://github.com/elastic/cloud-on-k8s/tree/main/config/recipes/autopilot) ::::{warning} -Compared to other configuration examples that are consistently tested, like [fleet-managed Elastic Agent on ECK](configuration-examples-fleet.md), [standalone Elastic Agent on ECK](configuration-examples-standalone.md), or [Beats on ECK](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-beat-configuration-examples.html), the recipes in this section are not regularly tested by our automation system, and therefore should not be considered to be production-ready. +Compared to other configuration examples that are consistently tested, like [fleet-managed Elastic Agent on ECK](configuration-examples-fleet.md), [standalone Elastic Agent on ECK](configuration-examples-standalone.md), or [Beats on ECK](/deploy-manage/deploy/cloud-on-k8s/configuration-examples-beats.md), the recipes in this section are not regularly tested by our automation system, and therefore should not be considered to be production-ready. :::: diff --git a/deploy-manage/deploy/cloud-on-k8s/requests-routing-to-elasticsearch-nodes.md b/deploy-manage/deploy/cloud-on-k8s/requests-routing-to-elasticsearch-nodes.md index 79cc20359c..8a1e6becd7 100644 --- a/deploy-manage/deploy/cloud-on-k8s/requests-routing-to-elasticsearch-nodes.md +++ b/deploy-manage/deploy/cloud-on-k8s/requests-routing-to-elasticsearch-nodes.md @@ -7,7 +7,7 @@ mapped_pages: # Requests routing to Elasticsearch nodes [k8s-traffic-splitting] -The default Kubernetes service created by ECK, named `-es-http`, is configured to include all the Elasticsearch nodes in that cluster. This configuration is good to get started and is adequate for most use cases. However, if you are operating an Elasticsearch cluster with [different node types](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) and want control over which nodes handle which types of traffic, you should create additional Kubernetes services yourself. +The default Kubernetes service created by ECK, named `-es-http`, is configured to include all the Elasticsearch nodes in that cluster. This configuration is good to get started and is adequate for most use cases. However, if you are operating an Elasticsearch cluster with [different node types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md) and want control over which nodes handle which types of traffic, you should create additional Kubernetes services yourself. As an alternative, you can use features provided by third-party software such as service meshes and ingress controllers to achieve more advanced traffic management configurations. Check the [recipes directory](https://github.com/elastic/cloud-on-k8s/tree/2.16/config/recipes) in the ECK source repository for a few examples. diff --git a/deploy-manage/deploy/cloud-on-k8s/required-rbac-permissions.md b/deploy-manage/deploy/cloud-on-k8s/required-rbac-permissions.md index 6ff4dceb6a..d13ec10703 100644 --- a/deploy-manage/deploy/cloud-on-k8s/required-rbac-permissions.md +++ b/deploy-manage/deploy/cloud-on-k8s/required-rbac-permissions.md @@ -36,7 +36,7 @@ These permissions are required to install the ECK operator in a Kubernetes clust | `RoleBinding or ClusterRoleBinding` | `rbac.authorization.k8s.io` | no | Binding between the operators role and the operators service account. Depending on the installation type (global/restricted), either global (ClusterRoleBinding) or namespaced (RoleBinding) resource is needed. | | `ConfigMap` | `core` | yes | Configuration parameters of the Operator. They can be specified directly in the StatefulSet (or Deployment) resource instead. | | `Namespace` | `core` | yes | Namespace where the operator will run. It can be a pre-existing namespace as well. | -| `ValidatingWebhookConfiguration` | `admissionregistration.k8s.io` | yes | Validating webhook installation. It provides fast feedback for the user directly as a APIServer response. A subset of these validations is also run by the operator itself, but the results are only available through operator logs and Kubernetes events. Check [docs](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-webhook.html) for more. | +| `ValidatingWebhookConfiguration` | `admissionregistration.k8s.io` | yes | Validating webhook installation. It provides fast feedback for the user directly as a APIServer response. A subset of these validations is also run by the operator itself, but the results are only available through operator logs and Kubernetes events. Check [docs](/deploy-manage/deploy/cloud-on-k8s/configure-validating-webhook.md) for more. | | `Secret` | `core` | yes | Secret containing the validating webhook’s endpoint CA certificate. | | `Service` | `core` | yes | Service for validating webhook endpoint. | @@ -59,9 +59,9 @@ These permissions are needed by the Service Account that ECK operator runs as. | `StatefulSet` | `apps` | no | Deploying Elasticsearch | | `Deployment` | `apps` | no | Deploying Kibana, APM Server, EnterpriseSearch, Maps, Beats or Elastic Agent. | | `DaemonSet` | `apps` | no | Deploying Beats or Elastic Agent. | -| `PodDisruptionBudget` | `policy` | no | Ensuring update safety for Elasticsearch. Check [docs](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-pod-disruption-budget.html) to learn more. | +| `PodDisruptionBudget` | `policy` | no | Ensuring update safety for Elasticsearch. Check [docs](/deploy-manage/deploy/cloud-on-k8s/pod-disruption-budget.md) to learn more. | | `StorageClass` | `storage.k8s.io` | yes | Validating storage expansion support. Check [docs](volume-claim-templates.md#k8s-volume-claim-templates-update) to learn more. | -| `coreauthorization.k8s.io` | `SubjectAccessReview` | yes | Controlling access between referenced resources. Check [docs](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-restrict-cross-namespace-associations.html) to learn more. | +| `coreauthorization.k8s.io` | `SubjectAccessReview` | yes | Controlling access between referenced resources. Check [docs](/deploy-manage/deploy/cloud-on-k8s/restrict-cross-namespace-resource-associations.md) to learn more. | And all permissions that the [Using ECK-managed resources](#k8s-eck-permissions-using) chapter specifies. diff --git a/deploy-manage/deploy/cloud-on-k8s/securing-logstash-api.md b/deploy-manage/deploy/cloud-on-k8s/securing-logstash-api.md index 2de0724b61..37e462c384 100644 --- a/deploy-manage/deploy/cloud-on-k8s/securing-logstash-api.md +++ b/deploy-manage/deploy/cloud-on-k8s/securing-logstash-api.md @@ -44,7 +44,7 @@ spec: 1. Store the username and password in a Secret. 2. Map the username and password to the environment variables of the Pod. -3. At Logstash startup, `${API_USERNAME}` and `${API_PASSWORD}` are replaced by the value of environment variables. Check [using environment variables](https://www.elastic.co/guide/en/logstash/current/environment-variables.html) for more details. +3. At Logstash startup, `${API_USERNAME}` and `${API_PASSWORD}` are replaced by the value of environment variables. Check [using environment variables](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/environment-variables.md) for more details. An alternative is to set up [keystore](advanced-configuration-logstash.md#k8s-logstash-keystore) to resolve `${API_USERNAME}` and `${API_PASSWORD}` diff --git a/deploy-manage/deploy/cloud-on-k8s/standalone-elastic-agent.md b/deploy-manage/deploy/cloud-on-k8s/standalone-elastic-agent.md index dfccd861ef..7806e64692 100644 --- a/deploy-manage/deploy/cloud-on-k8s/standalone-elastic-agent.md +++ b/deploy-manage/deploy/cloud-on-k8s/standalone-elastic-agent.md @@ -7,7 +7,7 @@ mapped_pages: # Standalone Elastic Agent [k8s-elastic-agent] -This section describes how to configure and deploy Elastic Agent in [standalone mode](https://www.elastic.co/guide/en/fleet/current/install-standalone-elastic-agent.html) with ECK. Check the [Fleet section](fleet-managed-elastic-agent.md) if you want to manage your Elastic Agents with [Fleet](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html). +This section describes how to configure and deploy Elastic Agent in [standalone mode](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-standalone-elastic-agent.md) with ECK. Check the [Fleet section](fleet-managed-elastic-agent.md) if you want to manage your Elastic Agents with [Fleet](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md). * [Quickstart](quickstart-standalone.md) * [Configuration](configuration-standalone.md) diff --git a/deploy-manage/deploy/cloud-on-k8s/transport-settings.md b/deploy-manage/deploy/cloud-on-k8s/transport-settings.md index 37ff1f3ba3..99dbfc4834 100644 --- a/deploy-manage/deploy/cloud-on-k8s/transport-settings.md +++ b/deploy-manage/deploy/cloud-on-k8s/transport-settings.md @@ -7,7 +7,7 @@ mapped_pages: # Transport settings [k8s-transport-settings] -The transport module in Elasticsearch is used for internal communication between nodes within the cluster as well as communication between remote clusters. Check the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) for details. For customization options of the HTTP layer, check [Services](accessing-services.md) and [TLS certificates](/deploy-manage/security/secure-http-communications.md). +The transport module in Elasticsearch is used for internal communication between nodes within the cluster as well as communication between remote clusters. Check the [Elasticsearch documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md) for details. For customization options of the HTTP layer, check [Services](accessing-services.md) and [TLS certificates](/deploy-manage/security/secure-http-communications.md). ## Customize the Transport Service [k8s_customize_the_transport_service] diff --git a/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md b/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md index b395f520e1..3380fd1eab 100644 --- a/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md +++ b/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md @@ -14,7 +14,7 @@ When `kibanaRef` is specified, Beat tries to connect to the Kibana instance. If ## Configuration containing key: null is malformed [k8s-beat-configuration-containing-key-null-is-malformed] -When `kubectl` is used to modify a resource, it calculates the diff between the user applied and the existing configuration. This diff has special [semantics](https://tools.ietf.org/html/rfc7396#section-1) that forces the removal of keys if they have special values. For example, if the user-applied configuration contains `some_key: null` (or equivalent `some_key: ~`), this is interpreted as an instruction to remove `some_key`. In Beats configurations, this is often a problem when it comes to defining things like [processors](https://www.elastic.co/guide/en/beats/filebeat/current/add-cloud-metadata.html). To avoid this problem: +When `kubectl` is used to modify a resource, it calculates the diff between the user applied and the existing configuration. This diff has special [semantics](https://tools.ietf.org/html/rfc7396#section-1) that forces the removal of keys if they have special values. For example, if the user-applied configuration contains `some_key: null` (or equivalent `some_key: ~`), this is interpreted as an instruction to remove `some_key`. In Beats configurations, this is often a problem when it comes to defining things like [processors](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/add-cloud-metadata.md). To avoid this problem: * Use `some_key: {}` (empty map) or `some_key: []` (empty array) instead of `some_key: null` if doing so does not affect the behaviour. This might not be possible in all cases as some applications distinguish between null values and empty values and behave differently. * Instead of using `config` to define configuration inline, use `configRef` and store the configuration in a Secret. @@ -28,6 +28,6 @@ If you have configured a Beat to run as a `Deployment` and you are using a `host ERROR instance/beat.go:958 Exiting: data path already locked by another beat. Please make sure that multiple beats are not sharing the same data path (path.data). ``` -This can happen if the new Pod is scheduled on the same Kubernetes node as the old Pod and is now trying to use the same data directory. Use a [`Recreate`](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-beat-configuration.html#k8s-beat-chose-the-deployment-model) deployment strategy to avoid this problem. +This can happen if the new Pod is scheduled on the same Kubernetes node as the old Pod and is now trying to use the same data directory. Use a [`Recreate`](/deploy-manage/deploy/cloud-on-k8s/configuration-beats.md#k8s-beat-chose-the-deployment-model) deployment strategy to avoid this problem. diff --git a/deploy-manage/deploy/cloud-on-k8s/virtual-memory.md b/deploy-manage/deploy/cloud-on-k8s/virtual-memory.md index b8b11dffb8..83206c8c02 100644 --- a/deploy-manage/deploy/cloud-on-k8s/virtual-memory.md +++ b/deploy-manage/deploy/cloud-on-k8s/virtual-memory.md @@ -11,9 +11,9 @@ By default, Elasticsearch uses memory mapping (`mmap`) to efficiently access ind The kernel setting `vm.max_map_count=262144` can be set on the host directly, by a dedicated init container which must be privileged, or a dedicated Daemonset. -For more information, check the Elasticsearch documentation on [Virtual memory](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html). +For more information, check the Elasticsearch documentation on [Virtual memory](/deploy-manage/deploy/self-managed/vm-max-map-count.md). -Optionally, you can select a different type of file system implementation for the storage. For possible options, check the [store module documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html). +Optionally, you can select a different type of file system implementation for the storage. For possible options, check the [store module documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-store-settings.md). ```yaml spec: diff --git a/deploy-manage/deploy/cloud-on-k8s/volume-claim-templates.md b/deploy-manage/deploy/cloud-on-k8s/volume-claim-templates.md index fa18192283..635452c03c 100644 --- a/deploy-manage/deploy/cloud-on-k8s/volume-claim-templates.md +++ b/deploy-manage/deploy/cloud-on-k8s/volume-claim-templates.md @@ -13,7 +13,7 @@ mapped_pages: By default, the operator creates a [`PersistentVolumeClaim`](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) with a capacity of 1Gi for each pod in an Elasticsearch cluster to prevent data loss in case of accidental pod deletion. For production workloads, you should define your own volume claim template with the desired storage capacity and (optionally) the Kubernetes [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) to associate with the persistent volume. ::::{important} -The name of the volume claim must always be `elasticsearch-data`. If you chose a different name you have to set up a corresponding volume mount matching the [data.path](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings) yourself ( `/usr/share/elasticsearch/data` by default). +The name of the volume claim must always be `elasticsearch-data`. If you chose a different name you have to set up a corresponding volume mount matching the [data.path](/deploy-manage/deploy/self-managed/important-settings-configuration.md#path-settings) yourself ( `/usr/share/elasticsearch/data` by default). :::: diff --git a/deploy-manage/deploy/elastic-cloud.md b/deploy-manage/deploy/elastic-cloud.md index f84e351fb2..3b2ceab150 100644 --- a/deploy-manage/deploy/elastic-cloud.md +++ b/deploy-manage/deploy/elastic-cloud.md @@ -13,10 +13,10 @@ Serverless projects use the core components of the {{stack}}, such as {{es}} and Elastic provides three serverless solutions available on {{ecloud}}: -* **https://www.elastic.co/guide/en/serverless/current/what-is-elasticsearch-serverless.html[{{es-serverless}}]**: Build powerful applications and search experiences using a rich ecosystem of vector search capabilities, APIs, and libraries. +* **/solutions/search.md[{{es-serverless}}]**: Build powerful applications and search experiences using a rich ecosystem of vector search capabilities, APIs, and libraries. % See solutions/search/serverless-elasticsearch-get-started.md -* **https://www.elastic.co/guide/en/serverless/current/what-is-observability-serverless.html[{{obs-serverless}}]**: Monitor your own platforms and services using powerful machine learning and analytics tools with your logs, metrics, traces, and APM data. -* **https://www.elastic.co/guide/en/serverless/current/what-is-security-serverless.html[{{sec-serverless}}]**: Detect, investigate, and respond to threats with SIEM, endpoint protection, and AI-powered analytics capabilities. +* **/solutions/observability.md[{{obs-serverless}}]**: Monitor your own platforms and services using powerful machine learning and analytics tools with your logs, metrics, traces, and APM data. +* **/solutions/security/elastic-security-serverless.md[{{sec-serverless}}]**: Detect, investigate, and respond to threats with SIEM, endpoint protection, and AI-powered analytics capabilities. [Learn more about {{serverless-full}} in our blog](https://www.elastic.co/blog/elastic-cloud-serverless). @@ -38,7 +38,7 @@ Elastic provides three serverless solutions available on {{ecloud}}: ## Differences between serverless projects and hosted deployments on {{ecloud}} [general-what-is-serverless-elastic-differences-between-serverless-projects-and-hosted-deployments-on-ecloud] -You can run [hosted deployments](https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html) of the {{stack}} on {{ecloud}}. These hosted deployments provide more provisioning and advanced configuration options. +You can run [hosted deployments](/deploy-manage/deploy/elastic-cloud/cloud-hosted.md) of the {{stack}} on {{ecloud}}. These hosted deployments provide more provisioning and advanced configuration options. | | | | | --- | --- | --- | @@ -63,7 +63,7 @@ Migration paths between hosted deployments and serverless projects are currently **How can I move data to or from serverless projects?** -We are working on data migration tools! In the interim, [use Logstash](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-logstash.html) with Elasticsearch input and output plugins to move data to and from serverless projects. +We are working on data migration tools! In the interim, [use Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md) with Elasticsearch input and output plugins to move data to and from serverless projects. **How does serverless ensure compatibility between software versions?** diff --git a/deploy-manage/deploy/elastic-cloud/add-plugins-extensions.md b/deploy-manage/deploy/elastic-cloud/add-plugins-extensions.md index 4845bc8fc6..a7d6e1c0a9 100644 --- a/deploy-manage/deploy/elastic-cloud/add-plugins-extensions.md +++ b/deploy-manage/deploy/elastic-cloud/add-plugins-extensions.md @@ -11,14 +11,14 @@ Plugins extend the core functionality of {{es}}. There are many suitable plugins * Analysis plugins, to provide analyzers targeted at languages other than English. * Scripting plugins, to provide additional scripting languages. -Plugins can come from different sources: the official ones created or at least maintained by Elastic, community-sourced plugins from other users, and plugins that you provide. Some of the official plugins are always provided with our service, and can be [enabled per deployment](https://www.elastic.co/guide/en/cloud/current/ec-adding-elastic-plugins.html). +Plugins can come from different sources: the official ones created or at least maintained by Elastic, community-sourced plugins from other users, and plugins that you provide. Some of the official plugins are always provided with our service, and can be [enabled per deployment](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/cloud/ec-adding-elastic-plugins.md). There are two ways to add plugins to a deployment in Elasticsearch Service: * [Enable one of the official plugins already available in Elasticsearch Service](https://www.elastic.co/guide/en/cloud/current/ec-adding-elastic-plugins.html). * [Upload a custom plugin and then enable it per deployment](upload-custom-plugins-bundles.md). -Custom plugins can include the official {{es}} plugins not provided with Elasticsearch Service, any of the community-sourced plugins, or [plugins that you write yourself](https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugin-authors.html). Uploading custom plugins is available only to Gold, Platinum, and Enterprise subscriptions. For more information, check [Upload custom plugins and bundles](upload-custom-plugins-bundles.md). +Custom plugins can include the official {{es}} plugins not provided with Elasticsearch Service, any of the community-sourced plugins, or [plugins that you write yourself](asciidocalypse://docs/elasticsearch/docs/extend/create-elasticsearch-plugins/index.md). Uploading custom plugins is available only to Gold, Platinum, and Enterprise subscriptions. For more information, check [Upload custom plugins and bundles](upload-custom-plugins-bundles.md). To learn more about the official and community-sourced plugins, refer to [{{es}} Plugins and Integrations](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html). diff --git a/deploy-manage/deploy/elastic-cloud/available-stack-versions.md b/deploy-manage/deploy/elastic-cloud/available-stack-versions.md index 570978c891..d8126081e3 100644 --- a/deploy-manage/deploy/elastic-cloud/available-stack-versions.md +++ b/deploy-manage/deploy/elastic-cloud/available-stack-versions.md @@ -25,7 +25,7 @@ You might sometimes notice additional versions listed in the user interface beyo Whenever a new Elastic Stack version is released, we do our best to provide the new version on our hosted service at the same time. We send you an email and add a notice to the console, recommending an upgrade. You’ll need to decide whether to upgrade to the new version with new features and bug fixes or to stay with a version you know works for you a while longer. -There can be [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes.html) in some new versions of Elasticsearch that break what used to work in older versions. Before upgrading, you’ll want to check if the new version introduces any changes that might affect your applications. A breaking change might be a function that was previously deprecated and that has been removed in the latest version, for example. If you have an application that depends on the removed function, the application will need to be updated to continue working with the new version of Elasticsearch. +There can be [breaking changes](asciidocalypse://docs/elasticsearch/docs/release-notes/breaking-changes/elasticsearch.md) in some new versions of Elasticsearch that break what used to work in older versions. Before upgrading, you’ll want to check if the new version introduces any changes that might affect your applications. A breaking change might be a function that was previously deprecated and that has been removed in the latest version, for example. If you have an application that depends on the removed function, the application will need to be updated to continue working with the new version of Elasticsearch. To learn more about upgrading to newer versions of the Elastic Stack on our hosted service, check [Upgrade Versions](../../upgrade/deployment-or-cluster.md). diff --git a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md index d9a5d6f2bd..f25bad63bb 100644 --- a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md +++ b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md @@ -139,7 +139,7 @@ $$$azure-integration-pricing$$$What is the pricing for this offer? $$$azure-integration-regions$$$Which Azure regions are supported? -: Here is the [list of available Azure regions](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html#ec-azure_regions) supported in {{ecloud}}. +: Here is the [list of available Azure regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md#ec-azure_regions) supported in {{ecloud}}. $$$azure-integration-subscription-levels$$$Which {{ecloud}} subscription levels are available? : The subscription defaults to the Enterprise subscription, granting immediate access to advanced {{stack}} features like machine learning, and premium support response time SLAs. {{ecloud}} offers a number of different [subscription levels](https://elastic.co/pricing). @@ -236,7 +236,7 @@ $$$azure-integration-cli-api$$$What other methods are available to deploy {{es}} * The {{ecloud}} [console](https://cloud.elastic.co?page=docs&placement=docs-body) * The {{ecloud}} [REST API](https://www.elastic.co/guide/en/cloud/current/ec-restful-api.html) - * The {{ecloud}} [command line tool](https://www.elastic.co/guide/en/ecctl/current/index.html) + * The {{ecloud}} [command line tool](asciidocalypse://docs/ecctl/docs/reference/cloud/ecctl/index.md) * The {{ecloud}} [Terraform provider](https://registry.terraform.io/providers/elastic/ec/latest/docs) Note that when you use any of the {{ecloud}} methods, the {{es}} deployment will not be available in Azure. @@ -426,7 +426,7 @@ $$$azure-integration-vm-extensions$$$How can I monitor my Azure virtual machines **Managing the Elastic Agent VM extension** - Once installed on the virtual machine, you can manage Elastic Agent either from Fleet or locally on the host where it’s installed. We recommend managing the VM extension through Fleet, because it makes handling and upgrading the agents considerably easier. For more information on the Elastic Agent, check [Manage your Elastic Agents](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html). + Once installed on the virtual machine, you can manage Elastic Agent either from Fleet or locally on the host where it’s installed. We recommend managing the VM extension through Fleet, because it makes handling and upgrading the agents considerably easier. For more information on the Elastic Agent, check [Manage your Elastic Agents](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md). **Operating system compatibility matrix** diff --git a/deploy-manage/deploy/elastic-cloud/change-hardware.md b/deploy-manage/deploy/elastic-cloud/change-hardware.md index 85ed6fb9ff..d605439d2f 100644 --- a/deploy-manage/deploy/elastic-cloud/change-hardware.md +++ b/deploy-manage/deploy/elastic-cloud/change-hardware.md @@ -5,7 +5,7 @@ mapped_pages: # Change hardware [ec-change-hardware-for-a-specific-resource] -The virtual hardware on which Elastic stack deployments run is defined by instance configurations. To learn more about what an instance configuration is, refer to [Instance configurations](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html#ec-getting-started-configurations). +The virtual hardware on which Elastic stack deployments run is defined by instance configurations. To learn more about what an instance configuration is, refer to [Instance configurations](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md#ec-getting-started-configurations). When a deployment is created, each Elasticsearch tier and stateless resource (e.g., Kibana) gets an instance configuration assigned to it, based on the hardware profile used. The combination of instance configurations defined within each hardware profile is designed to provide the best possible outcome for each use case. Therefore, it is not advisable to use instance configurations that are not specified on the hardware profile, except in specific situations in which we may need to migrate an Elasticsearch tier or stateless resource to a different hardware type. An example of such a scenario is when a cloud provider stops supporting a hardware type in a specific region. @@ -20,7 +20,7 @@ Prerequisites: Follow these steps to migrate to a different instance configuration, replacing the default `$EC_API_KEY` value with your actual API key: -1. From the [list of instance configurations available for each region](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html), select the target instance configuration you want to migrate to. +1. From the [list of instance configurations available for each region](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md), select the target instance configuration you want to migrate to. 2. Get the deployment update payload from the [Elasticsearch Service Console](https://cloud.elastic.co?page=docs&placement=docs-body) **Edit** page, by selecting **Equivalent API request**, and store it in a file called `migrate_instance_configuration.json`. Example payload containing relevant data for migrating the hot Elasticsearch tier: diff --git a/deploy-manage/deploy/elastic-cloud/create-an-elastic-cloud-hosted-deployment.md b/deploy-manage/deploy/elastic-cloud/create-an-elastic-cloud-hosted-deployment.md index 189e45bff7..700e29d2ca 100644 --- a/deploy-manage/deploy/elastic-cloud/create-an-elastic-cloud-hosted-deployment.md +++ b/deploy-manage/deploy/elastic-cloud/create-an-elastic-cloud-hosted-deployment.md @@ -33,10 +33,10 @@ Once you are on the **Create deployment** page, you can create the deployment wi : The cloud platform where you’ll deploy your deployment. We support: Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. You do not need to provide your own keys. Region - : The cloud platform’s region your deployment will live. If you have compliance or latency requirements, you can create your deployment in any of our [supported regions](https://www.elastic.co/guide/en/cloud/current/ec-reference-regions.html). The region should be as close as possible to the location of your data. + : The cloud platform’s region your deployment will live. If you have compliance or latency requirements, you can create your deployment in any of our [supported regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/regions.md). The region should be as close as possible to the location of your data. Hardware profile - : This allows you to configure the underlying virtual hardware that you’ll deploy your Elastic Stack on. Each hardware profile provides a unique blend of storage, RAM and vCPU sizes. You can select a hardware profile that’s best suited for your use case. For example CPU Optimized if you have a search-heavy use case that’s bound by compute resources. For more details, check the [hardware profiles](ec-configure-deployment-settings.md#ec-hardware-profiles) section. You can also view the [virtual hardware details](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html) which powers hardware profiles. With the **Advanced settings** option, you can configure the underlying virtual hardware associated with each profile. + : This allows you to configure the underlying virtual hardware that you’ll deploy your Elastic Stack on. Each hardware profile provides a unique blend of storage, RAM and vCPU sizes. You can select a hardware profile that’s best suited for your use case. For example CPU Optimized if you have a search-heavy use case that’s bound by compute resources. For more details, check the [hardware profiles](ec-configure-deployment-settings.md#ec-hardware-profiles) section. You can also view the [virtual hardware details](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md) which powers hardware profiles. With the **Advanced settings** option, you can configure the underlying virtual hardware associated with each profile. Version : The Elastic Stack version that will get deployed. Defaults to the latest version. Our [version policy](available-stack-versions.md) describes which versions are available to deploy. diff --git a/deploy-manage/deploy/elastic-cloud/differences-from-other-elasticsearch-offerings.md b/deploy-manage/deploy/elastic-cloud/differences-from-other-elasticsearch-offerings.md index 830efce08a..314159e307 100644 --- a/deploy-manage/deploy/elastic-cloud/differences-from-other-elasticsearch-offerings.md +++ b/deploy-manage/deploy/elastic-cloud/differences-from-other-elasticsearch-offerings.md @@ -40,7 +40,7 @@ To ensure optimal performance, follow these recommendations for sizing individua For large datasets that exceed the recommended maximum size for a single index, consider splitting your data across smaller indices and using an alias to search them collectively. -These recommendations do not apply to indices using better binary quantization (BBQ). Refer to [vector quantization](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-quantization) in the core {{es}} docs for more information. +These recommendations do not apply to indices using better binary quantization (BBQ). Refer to [vector quantization](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md#dense-vector-quantization) in the core {{es}} docs for more information. ## API availability [elasticsearch-differences-serverless-apis-availability] @@ -92,7 +92,7 @@ When attempting to use an unavailable API, you’ll receive a clear error messag ## Settings availability [elasticsearch-differences-serverless-settings-availability] -In {{es-serverless}}, you can only configure [index-level settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings). Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. +In {{es-serverless}}, you can only configure [index-level settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-modules-settings). Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. Available settings : **Index-level settings**: Settings that control how {{es}} documents are processed, stored, and searched are available to end users. These include: @@ -156,7 +156,7 @@ The following {{es-serverless}} project-specific features are planned for future * Managed Search connectors - You can use [self-managed Search connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-build-connector.html) in the meantime. + You can use [self-managed Search connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/self-managed-connectors.md) in the meantime. @@ -165,5 +165,5 @@ The following {{es-serverless}} project-specific features are planned for future The following features are not available in {{es-serverless}} and are not planned for future support: * [Custom plugins and bundles](https://www.elastic.co/guide/en/cloud/current/ec-custom-bundles.html) -* [{{es}} for Apache Hadoop](https://www.elastic.co/guide/en/elasticsearch/hadoop/current/reference.html) -* [Scripted metric aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) +* [{{es}} for Apache Hadoop](asciidocalypse://docs/elasticsearch-hadoop/docs/reference/ingestion-tools/elasticsearch-hadoop/elasticsearch-for-apache-hadoop.md) +* [Scripted metric aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) diff --git a/deploy-manage/deploy/elastic-cloud/ec-change-hardware-profile.md b/deploy-manage/deploy/elastic-cloud/ec-change-hardware-profile.md index 28e198d3d6..3cb929aacf 100644 --- a/deploy-manage/deploy/elastic-cloud/ec-change-hardware-profile.md +++ b/deploy-manage/deploy/elastic-cloud/ec-change-hardware-profile.md @@ -115,7 +115,7 @@ Replace those values with your actual API key and deployment ID in the following "region":"gcp-us-central1", ``` -3. Check the [hardware profiles available](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html) for the region that your deployment is in and find the template ID of the deployment hardware profile you’d like to use. +3. Check the [hardware profiles available](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md) for the region that your deployment is in and find the template ID of the deployment hardware profile you’d like to use. ::::{tip} If you wish to update your hardware profile to the latest version available for that same profile, locate the template ID corresponding to the `deployment_template` you retrieved at step 2, but without the version information. For example, if your deployment’s current hardware profile is `gcp-cpu-optimized-v5`, use `gcp-cpu-optimized` as a template ID to update your deployment. @@ -143,7 +143,7 @@ Replace those values with your actual API key and deployment ID in the following ### Storage optimized [ec-profiles-storage] -Your Elasticsearch data nodes are optimized for high I/O throughput. Use this profile if you are new to Elasticsearch or don’t need to run a more specialized workload. You can find the exact storage, memory, and vCPU allotment on the [hardware details page](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html#ec-getting-started-configurations) for each cloud provider. +Your Elasticsearch data nodes are optimized for high I/O throughput. Use this profile if you are new to Elasticsearch or don’t need to run a more specialized workload. You can find the exact storage, memory, and vCPU allotment on the [hardware details page](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md#ec-getting-started-configurations) for each cloud provider. **Ideal use case** diff --git a/deploy-manage/deploy/elastic-cloud/ec-configure-deployment-settings.md b/deploy-manage/deploy/elastic-cloud/ec-configure-deployment-settings.md index 6675c4e493..362f782d14 100644 --- a/deploy-manage/deploy/elastic-cloud/ec-configure-deployment-settings.md +++ b/deploy-manage/deploy/elastic-cloud/ec-configure-deployment-settings.md @@ -27,7 +27,7 @@ You can select your cloud platform and region only when you create a new deploym Elastic Cloud deploys Elastic Stack components into a *hardware profile* which provides a unique blend of storage, memory and vCPU. This gives you more flexibility to choose the hardware profile that best fits for your use case. For example, *Compute Optimized* deploys Elasticsearch on virtual hardware that provides high [vCPU](../../monitor/monitoring-data/ec-vcpu-boost-instance.md) which can help search-heavy use cases return queries quickly. -Under the covers, hardware profiles leverage virtualized instances from a cloud provider, such as Amazon Web Services, Google Compute Platform, and Microsoft Azure. You don’t interact with the cloud provider directly, but we do document what we use for your reference. To learn more, check [Elasticsearch Service Hardware](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html). +Under the covers, hardware profiles leverage virtualized instances from a cloud provider, such as Amazon Web Services, Google Compute Platform, and Microsoft Azure. You don’t interact with the cloud provider directly, but we do document what we use for your reference. To learn more, check [Elasticsearch Service Hardware](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md). The components of the Elastic Stack that we support as part of a deployment are called *instances* and include: diff --git a/deploy-manage/deploy/elastic-cloud/ec-customize-deployment-components.md b/deploy-manage/deploy/elastic-cloud/ec-customize-deployment-components.md index 74ef2a8102..2293a464c8 100644 --- a/deploy-manage/deploy/elastic-cloud/ec-customize-deployment-components.md +++ b/deploy-manage/deploy/elastic-cloud/ec-customize-deployment-components.md @@ -53,7 +53,7 @@ High availability is achieved by running a cluster with replicas in multiple dat Running in two data centers or availability zones is our default high availability configuration. It provides reasonably high protection against infrastructure failures and intermittent network problems. You might want three data centers if you need even higher fault tolerance. Just one zone might be sufficient, if the cluster is mainly used for testing or development. ::::{important} -Some [regions](https://www.elastic.co/guide/en/cloud/current/ec-reference-regions.html) might have only two availability zones. +Some [regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/regions.md) might have only two availability zones. :::: diff --git a/deploy-manage/deploy/elastic-cloud/ech-aws-instance-configuration.md b/deploy-manage/deploy/elastic-cloud/ech-aws-instance-configuration.md index 558752080e..cf00dca293 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-aws-instance-configuration.md +++ b/deploy-manage/deploy/elastic-cloud/ech-aws-instance-configuration.md @@ -7,7 +7,7 @@ mapped_pages: Amazon EC2 (AWS) C6gd, M6gd & R6gd instances, powered by AWS Graviton2, are now available for Elastic Cloud deployments. C6gd, M6gd & R6gd VMs use the [Graviton2, ARM neoverse N1 cores](https://aws.amazon.com/about-aws/whats-new/2020/07/announcing-new-amazon-ec2-instances-powered-aws-graviton2-processors/) and provide high compute coupled with fast NVMe storage, which makes them a good fit to power Elastic workloads. In addition, Graviton2 VMs also offer more than a 20% improvement in price-performance over comparable Intel chipsets. -In addition to AWS Graviton2 instances, Amazon EC2 (AWS) C5d, M5d, I3, I3en, and D2/D3 instances are now available for Elastic Cloud deployments in all supported [AWS Cloud Regions](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html#ec-aws_regions). +In addition to AWS Graviton2 instances, Amazon EC2 (AWS) C5d, M5d, I3, I3en, and D2/D3 instances are now available for Elastic Cloud deployments in all supported [AWS Cloud Regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md#ec-aws_regions). For specific AWS hardware and availability details, check the [Regional availability of instances per AWS region](ech-default-aws-configurations.md#aws-list-region) and the [AWS default provider instance configurations](ech-default-aws-configurations.md). @@ -25,7 +25,7 @@ For example, Instance ID / SKU: `aws.es.datahot.i3` | `\*.es.datahot.*` | Denotes that this configuration is an Elasticsearch (`es`) cluster component that serves as a data node for hot content. Other options may be `datawarm`, `datacold`, `datafrozen` for data nodes, and `kibana`, `master`, and so on for other components. | | `*.i3` | Denotes that this configuration is running on the AWS i3 instance family. | -The new configuration naming convention aligns with the [data tiers](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. +The new configuration naming convention aligns with the [data tiers](/manage-data/lifecycle/data-tiers.md) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. | New config name | Notes | | --- | --- | diff --git a/deploy-manage/deploy/elastic-cloud/ech-azure-instance-configuration.md b/deploy-manage/deploy/elastic-cloud/ech-azure-instance-configuration.md index 01602c7b2a..e39341a969 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-azure-instance-configuration.md +++ b/deploy-manage/deploy/elastic-cloud/ech-azure-instance-configuration.md @@ -5,7 +5,7 @@ mapped_pages: # Elasticsearch Add-On for Heroku Azure instance configurations [ech-azure-instance-configuration] -Azure [Ddv4](https://docs.microsoft.com/en-us/azure/virtual-machines/ddv4-ddsv4-series/), [Edsv4](https://docs.microsoft.com/en-us/azure/virtual-machines/edv4-edsv4-series/), [Fsv2](https://docs.microsoft.com/en-us/azure/virtual-machines/fsv2-series/), and [Lsv3](https://docs.microsoft.com/en-us/azure/virtual-machines/lsv3-series/) virtual machines (VM) are now available for Elastic Cloud deployments in all supported [Azure Cloud regions](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html#ec-azure_regions). These VMs provide additional combinations of compute, memory, and disk configurations to better fit your use-cases to optimize performance and cost. +Azure [Ddv4](https://docs.microsoft.com/en-us/azure/virtual-machines/ddv4-ddsv4-series/), [Edsv4](https://docs.microsoft.com/en-us/azure/virtual-machines/edv4-edsv4-series/), [Fsv2](https://docs.microsoft.com/en-us/azure/virtual-machines/fsv2-series/), and [Lsv3](https://docs.microsoft.com/en-us/azure/virtual-machines/lsv3-series/) virtual machines (VM) are now available for Elastic Cloud deployments in all supported [Azure Cloud regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md#ec-azure_regions). These VMs provide additional combinations of compute, memory, and disk configurations to better fit your use-cases to optimize performance and cost. To learn about the Azure specific configurations, check: @@ -26,7 +26,7 @@ For example, Instance ID / SKU: `azure.es.datahot.ddv4` | `\*.es.datahot.*` | Denotes that this configuration is an Elasticsearch (`es`) cluster component that serves as a data node for hot content. Other options may be `datawarm`, `datacold`, `datafrozen` for data nodes, and `kibana`, `master`, and so on for other components. | | `*.ddv4` | Denotes that this configuration is running on the Azure Ddv4 VM series. | -The new configuration naming convention aligns with the [data tiers](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. +The new configuration naming convention aligns with the [data tiers](/manage-data/lifecycle/data-tiers.md) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. | New config name | Notes | | --- | --- | diff --git a/deploy-manage/deploy/elastic-cloud/ech-gcp-instance-configuration.md b/deploy-manage/deploy/elastic-cloud/ech-gcp-instance-configuration.md index 732721ef58..71d37d5abc 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-gcp-instance-configuration.md +++ b/deploy-manage/deploy/elastic-cloud/ech-gcp-instance-configuration.md @@ -5,7 +5,7 @@ mapped_pages: # Elasticsearch Add-On for Heroku GCP instance configurations [ech-gcp-instance-configuration] -Google Compute Engine (GCE) N2 general purpose VM types are now available for Elastic Cloud deployments in all supported [Google Cloud regions](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html#ec-gcp_regions). [N2](https://cloud.google.com/compute/docs/machine-types) VMs have a better mix of vCPU, RAM, and internal disk, and are up to 50% more cost effective when compared to N1 VM types. In addition to N2, we also provide N2D VMs across the Google Cloud regions. +Google Compute Engine (GCE) N2 general purpose VM types are now available for Elastic Cloud deployments in all supported [Google Cloud regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md#ec-gcp_regions). [N2](https://cloud.google.com/compute/docs/machine-types) VMs have a better mix of vCPU, RAM, and internal disk, and are up to 50% more cost effective when compared to N1 VM types. In addition to N2, we also provide N2D VMs across the Google Cloud regions. To learn about the GCE specific configurations, check: @@ -27,7 +27,7 @@ For example, Instance ID / SKU: `gcp.es.datahot.n2.68x10x45` | `\*.n2.*` | Denotes that this configuration is running on the GCP N2 family. | | `*.68x10x45` | Denotes the resource configuration, delimited by “x”.
* The first argument (`68`) denotes the total gross RAM capacity of the instance. Normally we use 4GB of that for utilities and therefore this configuration has a “usable RAM” of 64GB.
* The second argument (`10`) denotes the number of vCPUs allocated to the entire machine.
* The third argument denotes the ratio of RAM to storage capacity as in 1:X. In this case, for each 1GB of RAM, you will have 45 GB of disk to store Elasticsearch data. | -The new configuration naming convention aligns with the [data tiers](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. +The new configuration naming convention aligns with the [data tiers](/manage-data/lifecycle/data-tiers.md) intended for each configuration type, replacing prior naming conventions of “highio”, “highcpu”, and so on. The following table details the new configurations for data nodes and compares them with prior naming conventions where applicable. | New config name | Notes | | --- | --- | diff --git a/deploy-manage/deploy/elastic-cloud/ech-getting-started.md b/deploy-manage/deploy/elastic-cloud/ech-getting-started.md index 0ee816c333..6a00612079 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-getting-started.md +++ b/deploy-manage/deploy/elastic-cloud/ech-getting-started.md @@ -9,7 +9,7 @@ This documentation applies to Heroku users who want to make use of the Elasticse The add-on runs on the Elasticsearch Service and provides access to [Elasticsearch](https://www.elastic.co/products/elasticsearch), the open source, distributed, RESTful search engine. Many other features of the Elastic Stack are also readily available to Heroku users through the [Elasticsearch Add-On for Heroku console](https://cloud.elastic.co?page=docs&placement=docs-body) after you install the add-on. For example, you can use Kibana to visualize your Elasticsearch data. -[Elasticsearch Machine Learning](https://www.elastic.co/guide/en/machine-learning/current/index.html), [Elastic APM](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Elastic Fleet Server](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) are not supported by the Elasticsearch Add-On for Heroku. +[Elasticsearch Machine Learning](/explore-analyze/machine-learning.md), [Elastic APM](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Elastic Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) are not supported by the Elasticsearch Add-On for Heroku. To learn more about what plans are available for Heroku users and their cost, check the [Elasticsearch add-on](https://elements.heroku.com/addons/foundelasticsearch) in the Elements Marketplace. diff --git a/deploy-manage/deploy/elastic-cloud/ech-migrate-data2.md b/deploy-manage/deploy/elastic-cloud/ech-migrate-data2.md index d1900c2d56..713aa72c7d 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-migrate-data2.md +++ b/deploy-manage/deploy/elastic-cloud/ech-migrate-data2.md @@ -25,7 +25,7 @@ Reindex from a remote cluster : The new cluster must be the same size as your old one, or larger, to accommodate the data. Depending on your security settings for your old cluster, you might need to temporarily allow TCP traffic on port 9243 for this procedure. Restore from a snapshot -: The new cluster must be the same size as your old one, or larger, to accommodate the data. The new cluster must also be an Elasticsearch version that is compatible with the old cluster (check [Elasticsearch snapshot version compatibility](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html#snapshot-restore-version-compatibility) for details). If you have not already done so, you will need to [set up snapshots for your old cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html) using a repository that can be accessed from the new cluster. +: The new cluster must be the same size as your old one, or larger, to accommodate the data. The new cluster must also be an Elasticsearch version that is compatible with the old cluster (check [Elasticsearch snapshot version compatibility](/deploy-manage/tools/snapshot-and-restore.md#snapshot-restore-version-compatibility) for details). If you have not already done so, you will need to [set up snapshots for your old cluster](/deploy-manage/tools/snapshot-and-restore/self-managed.md) using a repository that can be accessed from the new cluster. Migrating internal Elasticsearch indices : If you are migrating internal Elasticsearch indices from another cluster, specifically the `.kibana` index or the `.security` index, there are two options: @@ -35,7 +35,7 @@ Migrating internal Elasticsearch indices ::::{warning} -Before you migrate your Elasticsearch data, [define your index mappings](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html) on the new cluster. Index mappings are unable to migrate during reindex operations. +Before you migrate your Elasticsearch data, [define your index mappings](/manage-data/data-store/mapping.md) on the new cluster. Index mappings are unable to migrate during reindex operations. :::: diff --git a/deploy-manage/deploy/elastic-cloud/ech-restrictions.md b/deploy-manage/deploy/elastic-cloud/ech-restrictions.md index 480b776d8e..2c54b72e50 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-restrictions.md +++ b/deploy-manage/deploy/elastic-cloud/ech-restrictions.md @@ -70,14 +70,14 @@ Currently you can’t use SSO to login directly from {{ecloud}} into Kibana endp ## Kibana [ech-restrictions-kibana] * The maximum size of a single {{kib}} instance is 8GB. This means, {{kib}} instances can be scaled up to 8GB before they are scaled out. For example, when creating a deployment with a {{kib}} instance of size 16GB, then 2x8GB instances are created. If you face performance issues with {{kib}} PNG or PDF reports, the recommendations are to create multiple, smaller dashboards to export the data, or to use a third party browser extension for exporting the dashboard in the format you need. -* Running an external Kibana in parallel to Elasticsearch Add-On for Heroku’s Kibana instances may cause errors, for example [`Unable to decrypt attribute`](../../../explore-analyze/alerts-cases/alerts/alerting-common-issues.md#rule-cannot-decrypt-api-key), due to a mismatched [`xpack.encryptedSavedObjects.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#security-encrypted-saved-objects-settings) as Elasticsearch Add-On for Heroku does not [allow users to set](edit-stack-settings.md) nor expose this value. While workarounds are possible, this is not officially supported nor generally recommended. +* Running an external Kibana in parallel to Elasticsearch Add-On for Heroku’s Kibana instances may cause errors, for example [`Unable to decrypt attribute`](../../../explore-analyze/alerts-cases/alerts/alerting-common-issues.md#rule-cannot-decrypt-api-key), due to a mismatched [`xpack.encryptedSavedObjects.encryptionKey`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#security-encrypted-saved-objects-settings) as Elasticsearch Add-On for Heroku does not [allow users to set](edit-stack-settings.md) nor expose this value. While workarounds are possible, this is not officially supported nor generally recommended. ## APM Agent central configuration with PrivateLink or traffic filters [ech-restrictions-apm-traffic-filters] If you are using APM 7.9.0 or older: -* You cannot use [APM Agent central configuration](https://www.elastic.co/guide/en/observability/current/apm-agent-configuration.html) if your deployment is secured by [traffic filters](../../security/traffic-filtering.md). +* You cannot use [APM Agent central configuration](/solutions/observability/apps/apm-agent-central-configuration.md) if your deployment is secured by [traffic filters](../../security/traffic-filtering.md). * If you access your APM deployment over [PrivateLink](../../security/aws-privatelink-traffic-filters.md), to use APM Agent central configuration you need to allow access to the APM deployment over public internet. @@ -116,7 +116,7 @@ There are situations where you may need or want to move your installed {{agents} In {{ecloud}}, you can migrate your {{agents}} by taking a snapshot of your source deployment, and restoring it on a target deployment. -To make a seamless migration, after restoring from a snapshot there are some additional steps required, such as updating settings and resetting the agent policy. Check [Migrate Elastic Agents](https://www.elastic.co/guide/en/fleet/current/migrate-elastic-agent.html) for details. +To make a seamless migration, after restoring from a snapshot there are some additional steps required, such as updating settings and resetting the agent policy. Check [Migrate Elastic Agents](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/migrate-elastic-agent.md) for details. ## Regions and Availability Zones [ech-regions-and-availability-zone] diff --git a/deploy-manage/deploy/elastic-cloud/ech-version-policy.md b/deploy-manage/deploy/elastic-cloud/ech-version-policy.md index 2ee4d36e7b..b32e57de2e 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-version-policy.md +++ b/deploy-manage/deploy/elastic-cloud/ech-version-policy.md @@ -25,7 +25,7 @@ You might sometimes notice additional versions listed in the user interface beyo Whenever a new Elastic Stack version is released, we do our best to provide the new version on our hosted service at the same time. We send you an email and add a notice to the console, recommending an upgrade. You’ll need to decide whether to upgrade to the new version with new features and bug fixes or to stay with a version you know works for you a while longer. -There can be [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes.html) in some new versions of Elasticsearch that break what used to work in older versions. Before upgrading, you’ll want to check if the new version introduces any changes that might affect your applications. A breaking change might be a function that was previously deprecated and that has been removed in the latest version, for example. If you have an application that depends on the removed function, the application will need to be updated to continue working with the new version of Elasticsearch. +There can be [breaking changes](asciidocalypse://docs/elasticsearch/docs/release-notes/breaking-changes/elasticsearch.md) in some new versions of Elasticsearch that break what used to work in older versions. Before upgrading, you’ll want to check if the new version introduces any changes that might affect your applications. A breaking change might be a function that was previously deprecated and that has been removed in the latest version, for example. If you have an application that depends on the removed function, the application will need to be updated to continue working with the new version of Elasticsearch. To learn more about upgrading to newer versions of the Elastic Stack on our hosted service, check [Upgrade Versions](../../upgrade/deployment-or-cluster.md). diff --git a/deploy-manage/deploy/elastic-cloud/ech-whats-new.md b/deploy-manage/deploy/elastic-cloud/ech-whats-new.md index 1b10635f2e..4a578c21b3 100644 --- a/deploy-manage/deploy/elastic-cloud/ech-whats-new.md +++ b/deploy-manage/deploy/elastic-cloud/ech-whats-new.md @@ -15,14 +15,14 @@ Check the Release Notes to get the recent updates for each product. Elasticsearch -* [Elasticsearch 8.x Release Notes](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-release-notes.html) +* [Elasticsearch 8.x Release Notes](asciidocalypse://docs/elasticsearch/docs/release-notes/elasticsearch.md) * [Elasticsearch 7.x Release Notes](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/es-release-notes.html) * [Elasticsearch 6.x Release Notes](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/es-release-notes.html) * [Elasticsearch 5.x Release Notes](https://www.elastic.co/guide/en/elasticsearch/reference/5.6/es-release-notes.html) Kibana -* [Kibana 8.x Release Notes](https://www.elastic.co/guide/en/kibana/current/release-notes.html) +* [Kibana 8.x Release Notes](asciidocalypse://docs/kibana/docs/release-notes/kibana.md) * [Kibana 7.x Release Notes](https://www.elastic.co/guide/en/kibana/7.17/release-notes.html) * [Kibana 6.x Release Notes](https://www.elastic.co/guide/en/kibana/6.8/release-notes.html) * [Kibana 5.x Release Notes](https://www.elastic.co/guide/en/kibana/5.6/release-notes.html) diff --git a/deploy-manage/deploy/elastic-cloud/find-cloud-id.md b/deploy-manage/deploy/elastic-cloud/find-cloud-id.md index 1dceab74e5..f3ecd0f932 100644 --- a/deploy-manage/deploy/elastic-cloud/find-cloud-id.md +++ b/deploy-manage/deploy/elastic-cloud/find-cloud-id.md @@ -39,7 +39,7 @@ To use the Cloud ID, you need: * The unique Cloud ID for your deployment, available from the deployment overview page. * A user ID and password that has permission to send data to your cluster. - In our examples, we use the `elastic` superuser that every Elasticsearch cluster comes with. The password for the `elastic` user is provided when you create a deployment (and can also be [reset](../../users-roles/cluster-or-deployment-auth/built-in-users.md) if you forget it). On a production system, you should adapt these examples by creating a user that can write to and access only the minimally required indices. For each Beat, review the specific feature and role table, similar to the one in [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html) documentation. + In our examples, we use the `elastic` superuser that every Elasticsearch cluster comes with. The password for the `elastic` user is provided when you create a deployment (and can also be [reset](../../users-roles/cluster-or-deployment-auth/built-in-users.md) if you forget it). On a production system, you should adapt these examples by creating a user that can write to and access only the minimally required indices. For each Beat, review the specific feature and role table, similar to the one in [Metricbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/feature-roles.md) documentation. @@ -48,7 +48,7 @@ To use the Cloud ID, you need: The following example shows how you can send operational data from Metricbeat to Elasticsearch Service by using the Cloud ID. Any of the available Beats will work, but we had to pick one for this example. ::::{tip} -For others, you can learn more about [getting started](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html) with each Beat. +For others, you can learn more about [getting started](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md) with each Beat. :::: @@ -57,8 +57,8 @@ To get started with Metricbeat and Elasticsearch Service: 1. Log in to the [Elasticsearch Service Console](https://cloud.elastic.co?page=docs&placement=docs-body). 2. [Create a new deployment](create-an-elastic-cloud-hosted-deployment.md) and copy down the password for the `elastic` user. 3. On the deployment overview page, copy down the Cloud ID. -4. Set up the Beat of your choice, such as [Metricbeat version 7.17](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html). -5. [Configure the Beat output to send to Elastic Cloud](https://www.elastic.co/guide/en/beats/metricbeat/current/configure-cloud-id.html). +4. Set up the Beat of your choice, such as [Metricbeat version 7.17](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md). +5. [Configure the Beat output to send to Elastic Cloud](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configure-cloud-id.md). ::::{note} Make sure you replace the values for `cloud.id` and `cloud.auth` with your own information. diff --git a/deploy-manage/deploy/elastic-cloud/manage-deployments-using-elastic-cloud-api.md b/deploy-manage/deploy/elastic-cloud/manage-deployments-using-elastic-cloud-api.md index 1de40f136f..56c80f6de8 100644 --- a/deploy-manage/deploy/elastic-cloud/manage-deployments-using-elastic-cloud-api.md +++ b/deploy-manage/deploy/elastic-cloud/manage-deployments-using-elastic-cloud-api.md @@ -40,7 +40,7 @@ When you create a new deployment through the API, you have two options: ### Create a deployment using default values [ec-api-examples-deployment-simple] -This example requires minimal information in the API payload, and creates a deployment with default settings and a default name. You just need to specify one of the [available deployment templates](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html) in your API request header and the deployment is created using default settings from that template. +This example requires minimal information in the API payload, and creates a deployment with default settings and a default name. You just need to specify one of the [available deployment templates](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md) in your API request header and the deployment is created using default settings from that template. ```sh curl -XPOST \ diff --git a/deploy-manage/deploy/elastic-cloud/manage-integrations-server.md b/deploy-manage/deploy/elastic-cloud/manage-integrations-server.md index 7fde6a5748..891cb11a8f 100644 --- a/deploy-manage/deploy/elastic-cloud/manage-integrations-server.md +++ b/deploy-manage/deploy/elastic-cloud/manage-integrations-server.md @@ -5,7 +5,7 @@ mapped_pages: # Manage your Integrations server [ec-manage-integrations-server] -For deployments that are version 8.0 and later, you have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. +For deployments that are version 8.0 and later, you have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. As part of provisioning, the APM Server and Fleet Server are already configured to work with Elasticsearch and Kibana. At the end of provisioning, you are shown the secret token to configure communication between the APM Server and the backend [APM Agents](https://www.elastic.co/guide/en/apm/agent/index.html). The APM Agents get deployed within your services and applications. diff --git a/deploy-manage/deploy/elastic-cloud/manage-plugins-extensions-through-api.md b/deploy-manage/deploy/elastic-cloud/manage-plugins-extensions-through-api.md index 53b3e72a4f..25d73c1022 100644 --- a/deploy-manage/deploy/elastic-cloud/manage-plugins-extensions-through-api.md +++ b/deploy-manage/deploy/elastic-cloud/manage-plugins-extensions-through-api.md @@ -33,7 +33,7 @@ For plugins larger than 200MB the download URL option **must** be used. Plugins These two examples are for the `plugin` extension type. For bundles, change `extension_type` to `bundle`. -For plugins, `version` must match (exactly) the `elasticsearch.version` field defined in the plugin’s `plugin-descriptor.properties` file. Check [Help for plugin authors](https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugin-authors.html#plugin-authors) for details. For plugins larger than 5GB, the `plugin-descriptor.properties` file needs to be at the top of the archive. This ensures that the our verification process is able to detect that it is an Elasticsearch plugin; otherwise the plugin will be rejected by the API. This order can be achieved by specifying at time of creating the ZIP file: `zip -r name-of-plugin.zip plugin-descriptor.properties *`. +For plugins, `version` must match (exactly) the `elasticsearch.version` field defined in the plugin’s `plugin-descriptor.properties` file. Check [Help for plugin authors](asciidocalypse://docs/elasticsearch/docs/extend/create-elasticsearch-plugins/index.md#plugin-authors) for details. For plugins larger than 5GB, the `plugin-descriptor.properties` file needs to be at the top of the archive. This ensures that the our verification process is able to detect that it is an Elasticsearch plugin; otherwise the plugin will be rejected by the API. This order can be achieved by specifying at time of creating the ZIP file: `zip -r name-of-plugin.zip plugin-descriptor.properties *`. For bundles, we recommend setting `version` using wildcard notation that matches the major version of the Elasticsearch deployment. For example, if Elasticsearch is on version 8.4.3, simply set `8.*` as the version. The value `8.*` means that the bundle is compatible with all 8.x versions of Elasticsearch. diff --git a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md index 0547b1e9a9..8e6dc28ca5 100644 --- a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md +++ b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md @@ -44,7 +44,7 @@ Elasticsearch Service API $$$ec-restrictions-apis-elasticsearch$$$ Elasticsearch APIs -: The Elasticsearch APIs do not natively enforce rate limiting. However, all requests to the Elasticsearch cluster are subject to Elasticsearch configuration settings, such as the [network HTTP setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#http-settings) `http:max_content_length` which restricts the maximum size of an HTTP request body. This setting has a default value of 100MB, hence restricting API request payloads to that size. This setting is not currently configurable in Elasticsearch Service. For a list of which Elasticsearch settings are supported on Cloud, check [Add Elasticsearch user settings](edit-stack-settings.md). To learn about using the Elasticsearch APIs in Elasticsearch Service, check [Access the Elasticsearch API console](https://www.elastic.co/guide/en/cloud/current/ec-api-console.html). And, for full details about the Elasticsearch APIs and their endpoints, check the [Elasticsearch API reference documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html). +: The Elasticsearch APIs do not natively enforce rate limiting. However, all requests to the Elasticsearch cluster are subject to Elasticsearch configuration settings, such as the [network HTTP setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) `http:max_content_length` which restricts the maximum size of an HTTP request body. This setting has a default value of 100MB, hence restricting API request payloads to that size. This setting is not currently configurable in Elasticsearch Service. For a list of which Elasticsearch settings are supported on Cloud, check [Add Elasticsearch user settings](edit-stack-settings.md). To learn about using the Elasticsearch APIs in Elasticsearch Service, check [Access the Elasticsearch API console](https://www.elastic.co/guide/en/cloud/current/ec-api-console.html). And, for full details about the Elasticsearch APIs and their endpoints, check the [Elasticsearch API reference documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/index.md). $$$ec-restrictions-apis-kibana$$$ @@ -93,14 +93,14 @@ Currently you can’t use SSO to login directly from {{ecloud}} into Kibana endp ## Kibana [ec-restrictions-kibana] * The maximum size of a single {{kib}} instance is 8GB. This means, {{kib}} instances can be scaled up to 8GB before they are scaled out. For example, when creating a deployment with a {{kib}} instance of size 16GB, then 2x8GB instances are created. If you face performance issues with {{kib}} PNG or PDF reports, the recommendations are to create multiple, smaller dashboards to export the data, or to use a third party browser extension for exporting the dashboard in the format you need. -* Running an external Kibana in parallel to Elasticsearch Service’s Kibana instances may cause errors, for example [`Unable to decrypt attribute`](../../../explore-analyze/alerts-cases/alerts/alerting-common-issues.md#rule-cannot-decrypt-api-key), due to a mismatched [`xpack.encryptedSavedObjects.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#security-encrypted-saved-objects-settings) as Elasticsearch Service does not [allow users to set](edit-stack-settings.md) nor expose this value. While workarounds are possible, this is not officially supported nor generally recommended. +* Running an external Kibana in parallel to Elasticsearch Service’s Kibana instances may cause errors, for example [`Unable to decrypt attribute`](../../../explore-analyze/alerts-cases/alerts/alerting-common-issues.md#rule-cannot-decrypt-api-key), due to a mismatched [`xpack.encryptedSavedObjects.encryptionKey`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#security-encrypted-saved-objects-settings) as Elasticsearch Service does not [allow users to set](edit-stack-settings.md) nor expose this value. While workarounds are possible, this is not officially supported nor generally recommended. ## APM Agent central configuration with PrivateLink or traffic filters [ec-restrictions-apm-traffic-filters] If you are using APM 7.9.0 or older: -* You cannot use [APM Agent central configuration](https://www.elastic.co/guide/en/observability/current/apm-agent-configuration.html) if your deployment is secured by [traffic filters](../../security/traffic-filtering.md). +* You cannot use [APM Agent central configuration](/solutions/observability/apps/apm-agent-central-configuration.md) if your deployment is secured by [traffic filters](../../security/traffic-filtering.md). * If you access your APM deployment over [PrivateLink](../../security/aws-privatelink-traffic-filters.md), to use APM Agent central configuration you need to allow access to the APM deployment over public internet. @@ -128,7 +128,7 @@ There are situations where you may need or want to move your installed {{agents} In {{ecloud}}, you can migrate your {{agents}} by taking a snapshot of your source deployment, and restoring it on a target deployment. -To make a seamless migration, after restoring from a snapshot there are some additional steps required, such as updating settings and resetting the agent policy. Check [Migrate Elastic Agents](https://www.elastic.co/guide/en/fleet/current/migrate-elastic-agent.html) for details. +To make a seamless migration, after restoring from a snapshot there are some additional steps required, such as updating settings and resetting the agent policy. Check [Migrate Elastic Agents](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/migrate-elastic-agent.md) for details. ## Regions and Availability Zones [ec-regions-and-availability-zone] diff --git a/deploy-manage/deploy/elastic-cloud/switch-from-apm-to-integrations-server-payload.md b/deploy-manage/deploy/elastic-cloud/switch-from-apm-to-integrations-server-payload.md index 04ff45f431..698fccd79c 100644 --- a/deploy-manage/deploy/elastic-cloud/switch-from-apm-to-integrations-server-payload.md +++ b/deploy-manage/deploy/elastic-cloud/switch-from-apm-to-integrations-server-payload.md @@ -376,7 +376,7 @@ Beginning with Elastic Stack version 8.0, [Integrations Server](manage-integrati :::: -You have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. +You have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. As part of provisioning, the APM Server and Fleet Server are already configured to work with Elasticsearch and Kibana. At the end of provisioning, you are shown the secret token to configure communication between the APM Server and the backend [APM Agents](https://www.elastic.co/guide/en/apm/agent/index.html). The APM Agents get deployed within your services and applications. diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md b/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md index 17265c22b0..36abba2ff9 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks-heap-size.md @@ -5,5 +5,5 @@ mapped_pages: # Heap size check [bootstrap-checks-heap-size] -By default, {{es}} automatically sizes JVM heap based on a node’s [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. To avoid these issues, start the JVM with an initial heap size equal to the maximum heap size. +By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. If you manually override the default sizing and start the JVM with different initial and max heap sizes, the JVM may pause as it resizes the heap during system usage. If you enable [`bootstrap.memory_lock`](setup-configuration-memory.md#bootstrap-memory_lock), the JVM locks the initial heap size on startup. If the initial heap size is not equal to the maximum heap size, some JVM heap may not be locked after a resize. To avoid these issues, start the JVM with an initial heap size equal to the maximum heap size. diff --git a/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md b/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md index d7a3af443c..b6815048ae 100644 --- a/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md +++ b/deploy-manage/deploy/self-managed/bootstrap-checks-max-map-count.md @@ -7,5 +7,5 @@ mapped_pages: Continuing from the previous [point](max-size-virtual-memory-check.md), to use `mmap` effectively, Elasticsearch also requires the ability to create many memory-mapped areas. The maximum map count check checks that the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. -Alternatively, the maximum map count check is only needed if you are using `mmapfs` or `hybridfs` as the [store type](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html) for your indices. If you [do not allow](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html#allow-mmap) the use of `mmap` then this bootstrap check will not be enforced. +Alternatively, the maximum map count check is only needed if you are using `mmapfs` or `hybridfs` as the [store type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-store-settings.md) for your indices. If you [do not allow](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html#allow-mmap) the use of `mmap` then this bootstrap check will not be enforced. diff --git a/deploy-manage/deploy/self-managed/configure.md b/deploy-manage/deploy/self-managed/configure.md index 54ef1b25ed..565f4629d5 100644 --- a/deploy-manage/deploy/self-managed/configure.md +++ b/deploy-manage/deploy/self-managed/configure.md @@ -185,7 +185,7 @@ $$$elasticsearch-service-account-token$$$ `elasticsearch.serviceAccountToken` : Maximum number of documents loaded by each shard to generate autocomplete suggestions. This value must be a whole number greater than zero. **Default: `"100000"`** ::::{note} - To reload the [logging settings](https://www.elastic.co/guide/en/kibana/current/logging-settings.html), send a SIGHUP signal to {{kib}}. For more logging configuration options, see the [Configure Logging in {{kib}}](../../monitor/logging-configuration/kibana-logging.md) guide. + To reload the [logging settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/logging-settings.md), send a SIGHUP signal to {{kib}}. For more logging configuration options, see the [Configure Logging in {{kib}}](../../monitor/logging-configuration/kibana-logging.md) guide. :::: @@ -262,7 +262,7 @@ $$$tilemap-url$$$ `map.tilemap.url` ![logo cloud](https://doc-icons.s3.us-east-2 : [preview] Indicates which roles to configure the {{kib}} process with, which will effectively run {{kib}} in different modes. Valid options are `background_tasks` and `ui`, or `*` to select all roles. **Default: `*`** `notifications.connectors.default.email` -: Choose the default email connector for user notifications. As of `8.6.0`, {{kib}} is shipping with a new notification mechanism that will send email notifications for various user actions, e.g. assigning a *Case* to a user. To enable notifications, an email connector must be [preconfigured](https://www.elastic.co/guide/en/kibana/current/pre-configured-connectors.html) in the system via `kibana.yml`, and the notifications plugin must be configured to point to the ID of that connector. +: Choose the default email connector for user notifications. As of `8.6.0`, {{kib}} is shipping with a new notification mechanism that will send email notifications for various user actions, e.g. assigning a *Case* to a user. To enable notifications, an email connector must be [preconfigured](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/pre-configured-connectors.md) in the system via `kibana.yml`, and the notifications plugin must be configured to point to the ID of that connector. $$$path-data$$$ `path.data` : The path where {{kib}} stores persistent data not saved in {{es}}. **Default: `data`** @@ -472,7 +472,7 @@ $$$settings-xsrf-disableProtection$$$ `server.xsrf.disableProtection` : If authentication is enabled, setting this to `true` enables unauthenticated users to access the {{kib}} server status API and status page. **Default: `false`** $$$telemetry-allowChangingOptInStatus$$$ `telemetry.allowChangingOptInStatus` -: When `false`, users cannot change the opt-in status through [Advanced Settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html), and {{kib}} only looks at the value of [`telemetry.optIn`](#settings-telemetry-optIn) to determine whether to send telemetry data or not. **Default: `true`**. +: When `false`, users cannot change the opt-in status through [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md), and {{kib}} only looks at the value of [`telemetry.optIn`](#settings-telemetry-optIn) to determine whether to send telemetry data or not. **Default: `true`**. $$$settings-telemetry-optIn$$$ `telemetry.optIn` : Set to `false` to stop sending any telemetry data to Elastic. Reporting your cluster statistics helps us improve your user experience. When `false`, the telemetry data is never sent to Elastic.
diff --git a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md index cd4f8f7e5d..40847b987b 100644 --- a/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md +++ b/deploy-manage/deploy/self-managed/executable-jna-tmpdir.md @@ -30,7 +30,7 @@ To resolve these problems, either remove the `noexec` option from your `/tmp` fi ``` -If you need finer control over the location of these temporary files, you can also configure the path that JNA uses with the [JVM flag](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-options) `-Djna.tmpdir=` and you can configure the path that `libffi` uses for its temporary files by setting the `LIBFFI_TMPDIR` environment variable. Future versions of {{es}} may need additional configuration, so you should prefer to set `ES_TMPDIR` wherever possible. +If you need finer control over the location of these temporary files, you can also configure the path that JNA uses with the [JVM flag](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) `-Djna.tmpdir=` and you can configure the path that `libffi` uses for its temporary files by setting the `LIBFFI_TMPDIR` environment variable. Future versions of {{es}} may need additional configuration, so you should prefer to set `ES_TMPDIR` wherever possible. ::::{note} {{es}} does not remove its temporary directory. You should remove leftover temporary directories while {{es}} is not running. It is best to do this automatically, for instance on each reboot. If you are running on Linux, you can achieve this by using the [tmpfs](https://www.kernel.org/doc/html/latest/filesystems/tmpfs.md) file system. diff --git a/deploy-manage/deploy/self-managed/important-settings-configuration.md b/deploy-manage/deploy/self-managed/important-settings-configuration.md index 8f824593cd..ba1e1795b3 100644 --- a/deploy-manage/deploy/self-managed/important-settings-configuration.md +++ b/deploy-manage/deploy/self-managed/important-settings-configuration.md @@ -8,13 +8,13 @@ mapped_pages: {{es}} requires very little configuration to get started, but there are a number of items which **must** be considered before using your cluster in production: * [Path settings](#path-settings) -* [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-name) +* [Cluster name setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-name) * [Node name setting](#node-name) * [Network host settings](#network.host) * [Discovery settings](#discovery-settings) * [Heap size settings](#heap-size-settings) * [JVM heap dump path setting](#heap-dump-path) -* [GC logging settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#gc-logging) +* [GC logging settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#gc-logging) * [Temporary directory settings](#es-tmpdir) * [JVM fatal error log setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#error-file-path) * [Cluster backups](#important-settings-backups) @@ -60,7 +60,7 @@ Don’t modify anything within the data directory or run processes that might in :::: -Elasticsearch offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](https://www.elastic.co/guide/en/elasticsearch/reference/current/path-settings-overview.html#multiple-data-paths). +Elasticsearch offers a deprecated setting that allows you to specify multiple paths in `path.data`. To learn about this setting, and how to migrate away from it, refer to [Multiple data paths](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/path-settings.md#multiple-data-paths). ## Cluster name setting [_cluster_name_setting] @@ -93,7 +93,7 @@ node.name: prod-data-2 ## Network host setting [network.host] -By default, {{es}} only binds to loopback addresses such as `127.0.0.1` and `[::1]`. This is sufficient to run a cluster of one or more nodes on a single server for development and testing, but a [resilient production cluster](../../production-guidance/availability-and-resilience.md) must involve nodes on other servers. There are many [network settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) but usually all you need to configure is `network.host`: +By default, {{es}} only binds to loopback addresses such as `127.0.0.1` and `[::1]`. This is sufficient to run a cluster of one or more nodes on a single server for development and testing, but a [resilient production cluster](../../production-guidance/availability-and-resilience.md) must involve nodes on other servers. There are many [network settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md) but usually all you need to configure is `network.host`: ```yaml network.host: 192.168.1.10 @@ -158,12 +158,12 @@ cluster.initial_master_nodes: <1> 1. Identify the initial master nodes by their [`node.name`](#node-name), which defaults to their hostname. Ensure that the value in `cluster.initial_master_nodes` matches the `node.name` exactly. If you use a fully-qualified domain name (FQDN) such as `master-node-a.example.com` for your node names, then you must use the FQDN in this list. Conversely, if `node.name` is a bare hostname without any trailing qualifiers, you must also omit the trailing qualifiers in `cluster.initial_master_nodes`. -See [bootstrapping a cluster](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) and [discovery and cluster formation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html). +See [bootstrapping a cluster](../../distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md) and [discovery and cluster formation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md). ## Heap size settings [heap-size-settings] -By default, {{es}} automatically sets the JVM heap size based on a node’s [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) and total memory. We recommend the default sizing for most production environments. +By default, {{es}} automatically sets the JVM heap size based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. We recommend the default sizing for most production environments. If needed, you can override the default sizing by manually [setting the JVM heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-heap-size). diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md index d239d7fb98..635f919a14 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-from-archive-on-linux-macos.md @@ -7,7 +7,7 @@ mapped_pages: {{es}} is available as a `.tar.gz` archive for Linux and MacOS. -This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. The latest stable version of {{es}} can be found on the [Download {{es}}](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). @@ -130,11 +130,11 @@ When {{es}} starts for the first time, the security auto-configuration process b Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool to generate an enrollment token for your new nodes. +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. ```sh bin/elasticsearch-create-enrollment-token -s node @@ -307,7 +307,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-keystore.html) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md index 70e5c548b9..dab8ea2e55 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-debian-package.md @@ -7,7 +7,7 @@ mapped_pages: The Debian package for Elasticsearch can be [downloaded from our website](#install-deb) or from our [APT repository](#deb-repo). It can be used to install Elasticsearch on any Debian-based system such as Debian and Ubuntu. -This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. The latest stable version of Elasticsearch can be found on the [Download Elasticsearch](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). @@ -78,7 +78,7 @@ When installing {{es}}, security features are enabled and configured by default. * Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. * Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) command. +The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. We recommend storing the `elastic` password as an environment variable in your shell. For example: @@ -340,7 +340,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-keystore.html) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 055030956a..9c173ef7a8 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -7,7 +7,7 @@ mapped_pages: Docker images for {{es}} are available from the Elastic Docker registry. A list of all published Docker images and tags is available at [www.docker.elastic.co](https://www.docker.elastic.co). The source code is in [GitHub](https://github.com/elastic/elasticsearch/blob/master/distribution/docker). -This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. ::::{tip} If you just want to test {{es}} in local development, refer to [Run {{es}} locally](../../../solutions/search/get-started.md). Please note that this setup is not suitable for production environments. @@ -452,9 +452,9 @@ The image [exposes](https://docs.docker.com/engine/reference/builder/#/expose) T ### Manually set the heap size [docker-set-heap-size] -By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. +By default, {{es}} automatically sizes JVM heap based on a nodes’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and the total memory available to the node’s container. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting JVM heap size. -To manually set the heap size in production, bind mount a [JVM options](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-heap-size) settings. +To manually set the heap size in production, bind mount a [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired [heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-heap-size) settings. For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` environment variable. For example, to use 1GB, use the following command. @@ -595,7 +595,7 @@ Some plugins require additional security permissions. You must explicitly accept * Attaching a `tty` when you run the Docker image and allowing the permissions when prompted. * Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command. -See [Plugin management](https://www.elastic.co/guide/en/elasticsearch/plugins/current/_other_command_line_parameters.html) for more information. +See [Plugin management](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/_other_command_line_parameters.md) for more information. ### Troubleshoot Docker errors for {{es}} [troubleshoot-docker-errors] diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 7c76679121..47010a4254 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -12,7 +12,7 @@ RPM install is not supported on distributions with old versions of RPM, such as :::: -This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. The latest stable version of Elasticsearch can be found on the [Download Elasticsearch](https://elastic.co/downloads/elasticsearch) page. Other versions can be found on the [Past Releases page](https://elastic.co/downloads/past-releases). @@ -82,7 +82,7 @@ When installing {{es}}, security features are enabled and configured by default. * Authentication and authorization are enabled, and a password is generated for the `elastic` built-in superuser. * Certificates and keys for TLS are generated for the transport and HTTP layer, and TLS is enabled and configured with these keys and certificates. -The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) command. +The password and certificate and keys are output to your terminal. You can reset the password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) command. We recommend storing the `elastic` password as an environment variable in your shell. For example: @@ -344,7 +344,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-keystore.html) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](../../security/secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 066b87baaa..29d15d3f08 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -7,7 +7,7 @@ mapped_pages: {{es}} can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup {{es}} to run as a service. -This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. +This package contains both free and subscription features. [Start a 30-day trial](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/license-settings.md) to try out all of the features. ::::{note} On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. @@ -87,11 +87,11 @@ When {{es}} starts for the first time, the security auto-configuration process b Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool to generate an enrollment token for your new nodes. +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. ```sh bin\elasticsearch-create-enrollment-token -s node @@ -194,7 +194,7 @@ You can install {{es}} as a service that runs in the background or starts automa TLS is not enabled or configured when you start {{es}} as a service. :::: -3. Generate a password for the `elastic` user with the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) tool. The password is output to the command line. +3. Generate a password for the `elastic` user with the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. The password is output to the command line. ```sh C:\Program Files\elasticsearch-9.0.0-beta1\bin>\bin\elasticsearch-reset-password -u elastic @@ -284,9 +284,9 @@ At its core, `elasticsearch-service.bat` relies on [Apache Commons Daemon](https ::::{note} -By default, {{es}} automatically sizes JVM heap based on a node’s [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) and total memory. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting the heap size. +By default, {{es}} automatically sizes JVM heap based on a node’s [roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) and total memory. We recommend this default sizing for most production environments. If needed, you can override default sizing by manually setting the heap size. -When installing {{es}} on Windows as a service for the first time or running {{es}} from the command line, you can manually [Set the JVM heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-heap-size). To resize the heap for an already installed service, use the service manager: `bin\elasticsearch-service.bat manager`. +When installing {{es}} on Windows as a service for the first time or running {{es}} from the command line, you can manually [Set the JVM heap size](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-heap-size). To resize the heap for an already installed service, use the service manager: `bin\elasticsearch-service.bat manager`. :::: diff --git a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md index a05a8bc0ac..dc8991e991 100644 --- a/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md +++ b/deploy-manage/deploy/self-managed/install-from-archive-on-linux-macos.md @@ -77,7 +77,7 @@ If this is the first time you’re starting {{kib}}, this command generates a un 3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. ::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool. These tools are available in the {{es}} `bin` directory. +If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. :::: diff --git a/deploy-manage/deploy/self-managed/install-on-windows.md b/deploy-manage/deploy/self-managed/install-on-windows.md index baf9063552..cd2e52e71f 100644 --- a/deploy-manage/deploy/self-managed/install-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-on-windows.md @@ -50,7 +50,7 @@ If this is the first time you’re starting {{kib}}, this command generates a un 3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. ::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool. These tools are available in the {{es}} `bin` directory. +If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. :::: diff --git a/deploy-manage/deploy/self-managed/install-with-debian-package.md b/deploy-manage/deploy/self-managed/install-with-debian-package.md index c594547e39..0316337fa7 100644 --- a/deploy-manage/deploy/self-managed/install-with-debian-package.md +++ b/deploy-manage/deploy/self-managed/install-with-debian-package.md @@ -49,7 +49,7 @@ When you start {{es}} for the first time, the following security configuration o The password and certificate and keys are output to your terminal. -You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool: +You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: ```sh bin/elasticsearch-create-enrollment-token -s kibana diff --git a/deploy-manage/deploy/self-managed/install-with-rpm.md b/deploy-manage/deploy/self-managed/install-with-rpm.md index 277d944120..ce62ded7e8 100644 --- a/deploy-manage/deploy/self-managed/install-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-with-rpm.md @@ -59,7 +59,7 @@ When you start {{es}} for the first time, the following security configuration o The password and certificate and keys are output to your terminal. -You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool: +You can then generate an enrollment token for {{kib}} with the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool: ```sh bin/elasticsearch-create-enrollment-token -s kibana diff --git a/deploy-manage/deploy/self-managed/max-number-threads-check.md b/deploy-manage/deploy/self-managed/max-number-threads-check.md index 13f84ff376..1212518221 100644 --- a/deploy-manage/deploy/self-managed/max-number-threads-check.md +++ b/deploy-manage/deploy/self-managed/max-number-threads-check.md @@ -5,5 +5,5 @@ mapped_pages: # Maximum number of threads check [max-number-threads-check] -Elasticsearch executes requests by breaking the request down into stages and handing those stages off to different thread pool executors. There are different [thread pool executors](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html) for a variety of tasks within Elasticsearch. Thus, Elasticsearch needs the ability to create a lot of threads. The maximum number of threads check ensures that the Elasticsearch process has the rights to create enough threads under normal use. This check is enforced only on Linux. If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the Elasticsearch process the ability to create at least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). +Elasticsearch executes requests by breaking the request down into stages and handing those stages off to different thread pool executors. There are different [thread pool executors](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/thread-pool-settings.md) for a variety of tasks within Elasticsearch. Thus, Elasticsearch needs the ability to create a lot of threads. The maximum number of threads check ensures that the Elasticsearch process has the rights to create enough threads under normal use. This check is enforced only on Linux. If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the Elasticsearch process the ability to create at least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). diff --git a/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md b/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md index 4c792228a2..c554d7a342 100644 --- a/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md +++ b/deploy-manage/deploy/self-managed/networkaddress-cache-ttl.md @@ -5,5 +5,5 @@ mapped_pages: # DNS cache settings [networkaddress-cache-ttl] -Elasticsearch runs with a security manager in place. With a security manager in place, the JVM defaults to caching positive hostname resolutions indefinitely and defaults to caching negative hostname resolutions for ten seconds. Elasticsearch overrides this behavior with default values to cache positive lookups for sixty seconds, and to cache negative lookups for ten seconds. These values should be suitable for most environments, including environments where DNS resolutions vary with time. If not, you can edit the values `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl` in the [JVM options](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-options). Note that the values [`networkaddress.cache.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) and [`networkaddress.cache.negative.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) in the [Java security policy](https://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.md) are ignored by Elasticsearch unless you remove the settings for `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl`. +Elasticsearch runs with a security manager in place. With a security manager in place, the JVM defaults to caching positive hostname resolutions indefinitely and defaults to caching negative hostname resolutions for ten seconds. Elasticsearch overrides this behavior with default values to cache positive lookups for sixty seconds, and to cache negative lookups for ten seconds. These values should be suitable for most environments, including environments where DNS resolutions vary with time. If not, you can edit the values `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl` in the [JVM options](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options). Note that the values [`networkaddress.cache.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) and [`networkaddress.cache.negative.ttl=`](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.md) in the [Java security policy](https://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.md) are ignored by Elasticsearch unless you remove the settings for `es.networkaddress.cache.ttl` and `es.networkaddress.cache.negative.ttl`. diff --git a/deploy-manage/deploy/self-managed/plugins.md b/deploy-manage/deploy/self-managed/plugins.md index 4e294e857f..856188b50d 100644 --- a/deploy-manage/deploy/self-managed/plugins.md +++ b/deploy-manage/deploy/self-managed/plugins.md @@ -9,5 +9,5 @@ Plugins are a way to enhance the basic Elasticsearch functionality in a custom m For information about selecting and installing plugins, see [{{es}} Plugins and Integrations](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html). -For information about developing your own plugin, see [Help for plugin authors](https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugin-authors.html). +For information about developing your own plugin, see [Help for plugin authors](asciidocalypse://docs/elasticsearch/docs/extend/create-elasticsearch-plugins/index.md). diff --git a/deploy-manage/deploy/self-managed/system-config-tcpretries.md b/deploy-manage/deploy/self-managed/system-config-tcpretries.md index c57074af13..8c939705b8 100644 --- a/deploy-manage/deploy/self-managed/system-config-tcpretries.md +++ b/deploy-manage/deploy/self-managed/system-config-tcpretries.md @@ -5,7 +5,7 @@ mapped_pages: # TCP retransmission timeout [system-config-tcpretries] -Each pair of {{es}} nodes communicates via a number of TCP connections which [remain open](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#long-lived-connections) until one of the nodes shuts down or communication between the nodes is disrupted by a failure in the underlying infrastructure. +Each pair of {{es}} nodes communicates via a number of TCP connections which [remain open](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#long-lived-connections) until one of the nodes shuts down or communication between the nodes is disrupted by a failure in the underlying infrastructure. TCP provides reliable communication over occasionally unreliable networks by hiding temporary network disruptions from the communicating applications. Your operating system will retransmit any lost messages a number of times before informing the sender of any problem. {{es}} must wait while the retransmissions are happening and can only react once the operating system decides to give up. Users must therefore also wait for a sequence of retransmissions to complete. diff --git a/deploy-manage/deploy/self-managed/vm-max-map-count.md b/deploy-manage/deploy/self-managed/vm-max-map-count.md index 29c8964e7f..bc5e646378 100644 --- a/deploy-manage/deploy/self-managed/vm-max-map-count.md +++ b/deploy-manage/deploy/self-managed/vm-max-map-count.md @@ -5,7 +5,7 @@ mapped_pages: # Virtual memory [vm-max-map-count] -Elasticsearch uses a [`mmapfs`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) directory by default to store its indices. The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. +Elasticsearch uses a [`mmapfs`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-store-settings.md#mmapfs) directory by default to store its indices. The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. On Linux, you can increase the limits by running the following command as `root`: diff --git a/deploy-manage/distributed-architecture.md b/deploy-manage/distributed-architecture.md index 0800fda4d2..a26c9a6de9 100644 --- a/deploy-manage/distributed-architecture.md +++ b/deploy-manage/distributed-architecture.md @@ -16,5 +16,5 @@ The topics in this section provides information about the architecture of {{es}} * [Shard allocation awareness](distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md): Learn how to use custom node attributes to distribute shards across different racks or availability zones. -* [Shard request cache](https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-request-cache.html): Learn how {{es}} caches search requests to improve performance. +* [Shard request cache](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/shard-request-cache-settings.md): Learn how {{es}} caches search requests to improve performance. diff --git a/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md b/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md index 9b2722ad2d..4041217400 100644 --- a/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md +++ b/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md @@ -5,7 +5,7 @@ mapped_pages: # Node roles [node-roles-overview] -Any time that you start an instance of {{es}}, you are starting a *node*. A collection of connected nodes is called a [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html). If you are running a single node of {{es}}, then you have a cluster of one node. All nodes know about all the other nodes in the cluster and can forward client requests to the appropriate node. +Any time that you start an instance of {{es}}, you are starting a *node*. A collection of connected nodes is called a [cluster](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). If you are running a single node of {{es}}, then you have a cluster of one node. All nodes know about all the other nodes in the cluster and can forward client requests to the appropriate node. Each node performs one or more roles. Roles control the behavior of the node in the cluster. @@ -61,14 +61,14 @@ Similarly, each master-eligible node maintains the following data on disk: * the index metadata for every index in the cluster, and * the cluster-wide metadata, such as settings and index templates. -Each node checks the contents of its data path at startup. If it discovers unexpected data then it will refuse to start. This is to avoid importing unwanted [dangling indices](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html#dangling-indices) which can lead to a red cluster health. To be more precise, nodes without the `data` role will refuse to start if they find any shard data on disk at startup, and nodes without both the `master` and `data` roles will refuse to start if they have any index metadata on disk at startup. +Each node checks the contents of its data path at startup. If it discovers unexpected data then it will refuse to start. This is to avoid importing unwanted [dangling indices](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/local-gateway.md#dangling-indices) which can lead to a red cluster health. To be more precise, nodes without the `data` role will refuse to start if they find any shard data on disk at startup, and nodes without both the `master` and `data` roles will refuse to start if they have any index metadata on disk at startup. It is possible to change the roles of a node by adjusting its `elasticsearch.yml` file and restarting it. This is known as *repurposing* a node. In order to satisfy the checks for unexpected data described above, you must perform some extra steps to prepare a node for repurposing when starting the node without the `data` or `master` roles. * If you want to repurpose a data node by removing the `data` role then you should first use an [allocation filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#cluster-shard-allocation-filtering) to safely migrate all the shard data onto other nodes in the cluster. * If you want to repurpose a node to have neither the `data` nor `master` roles then it is simplest to start a brand-new node with an empty data path and the desired roles. You may find it safest to use an [allocation filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#cluster-shard-allocation-filtering) to migrate the shard data elsewhere in the cluster first. -If it is not possible to follow these extra steps then you may be able to use the [`elasticsearch-node repurpose`](https://www.elastic.co/guide/en/elasticsearch/reference/current/node-tool.html#node-tool-repurpose) tool to delete any excess data that prevents a node from starting. +If it is not possible to follow these extra steps then you may be able to use the [`elasticsearch-node repurpose`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/node-tool.md#node-tool-repurpose) tool to delete any excess data that prevents a node from starting. ## Available node roles [node-roles-list] @@ -79,7 +79,7 @@ The following is a list of the roles that a node can perform in a cluster. A nod * [Data node](#data-node-role) (`data`, `data_content`, `data_hot`, `data_warm`, `data_cold`, `data_frozen`): A node that has one of several data roles. Data nodes hold data and perform data related operations such as CRUD, search, and aggregations. You might use multiple data roles in a cluster so you can implement [data tiers](../../../manage-data/lifecycle/data-tiers.md). * [Ingest node](#node-ingest-node) (`ingest`): Ingest nodes are able to apply an [ingest pipeline](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md) to a document in order to transform and enrich the document before indexing. With a heavy ingest load, it makes sense to use dedicated ingest nodes and to not include the `ingest` role from nodes that have the `master` or `data` roles. * [Remote-eligible node](#remote-node) (`remote_cluster_client`): A node that is eligible to act as a remote client. -* [Machine learning node](#ml-node-role) (`ml`): A node that can run {{ml-features}}. If you want to use {{ml-features}}, there must be at least one {{ml}} node in your cluster. For more information, see [Machine learning settings](../../deploy/self-managed/configure-elasticsearch.md) and [Machine learning in the {{stack}}](https://www.elastic.co/guide/en/machine-learning/current/index.html). +* [Machine learning node](#ml-node-role) (`ml`): A node that can run {{ml-features}}. If you want to use {{ml-features}}, there must be at least one {{ml}} node in your cluster. For more information, see [Machine learning settings](../../deploy/self-managed/configure-elasticsearch.md) and [Machine learning in the {{stack}}](/explore-analyze/machine-learning.md). * [{{transform-cap}} node](#transform-node-role) (`transform`): A node that can perform {{transforms}}. If you want to use {{transforms}}, there must be at least one {{transform}} node in your cluster. For more information, see [{{transforms-cap}} settings](../../deploy/self-managed/configure-elasticsearch.md) and [*Transforming data*](../../../explore-analyze/transforms.md). ::::{admonition} Coordinating node @@ -222,7 +222,7 @@ node.roles: [ data_warm ] Cold data nodes are part of the cold tier. When you no longer need to search time series data regularly, it can move from the warm tier to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. -For better storage savings, you can keep [fully mounted indices](../../tools/snapshot-and-restore/searchable-snapshots.md#fully-mounted) of [{{search-snaps}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) on the cold tier. Unlike regular indices, these fully mounted indices don’t require replicas for reliability. In the event of a failure, they can recover data from the underlying snapshot instead. This potentially halves the local storage needed for the data. A snapshot repository is required to use fully mounted indices in the cold tier. Fully mounted indices are read-only. +For better storage savings, you can keep [fully mounted indices](../../tools/snapshot-and-restore/searchable-snapshots.md#fully-mounted) of [{{search-snaps}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) on the cold tier. Unlike regular indices, these fully mounted indices don’t require replicas for reliability. In the event of a failure, they can recover data from the underlying snapshot instead. This potentially halves the local storage needed for the data. A snapshot repository is required to use fully mounted indices in the cold tier. Fully mounted indices are read-only. Alternatively, you can use the cold tier to store regular indices with replicas instead of using {{search-snaps}}. This lets you store older data on less expensive hardware but doesn’t reduce required disk space compared to the warm tier. diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation.md b/deploy-manage/distributed-architecture/discovery-cluster-formation.md index 417923af1e..ef0a749996 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation.md @@ -30,7 +30,7 @@ The following processes and settings are part of discovery and cluster formation [Cluster fault detection](discovery-cluster-formation/cluster-fault-detection.md) : {{es}} performs health checks to detect and remove faulty nodes. -[Settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html) +[Settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md) : There are settings that enable users to influence the discovery, cluster formation, master election and fault detection processes. diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation/cluster-fault-detection.md b/deploy-manage/distributed-architecture/discovery-cluster-formation/cluster-fault-detection.md index 433695a4d4..fe72712b51 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation/cluster-fault-detection.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation/cluster-fault-detection.md @@ -7,7 +7,7 @@ mapped_pages: The elected master periodically checks each of the nodes in the cluster to ensure that they are still connected and healthy. Each node in the cluster also periodically checks the health of the elected master. These checks are known respectively as *follower checks* and *leader checks*. -Elasticsearch allows these checks to occasionally fail or timeout without taking any action. It considers a node to be faulty only after a number of consecutive checks have failed. You can control fault detection behavior with [`cluster.fault_detection.*` settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html). +Elasticsearch allows these checks to occasionally fail or timeout without taking any action. It considers a node to be faulty only after a number of consecutive checks have failed. You can control fault detection behavior with [`cluster.fault_detection.*` settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md). If the elected master detects that a node has disconnected, however, this situation is treated as an immediate failure. The master bypasses the timeout and retry setting values and attempts to remove the node from the cluster. Similarly, if a node detects that the elected master has disconnected, this situation is treated as an immediate failure. The node bypasses the timeout and retry settings and restarts its discovery phase to try and find or elect a new master. diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md b/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md index d85653b6a2..2316afa779 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation/discovery-hosts-providers.md @@ -19,7 +19,7 @@ Refer to [Troubleshooting discovery](../../../troubleshoot/elasticsearch/discove ## Seed hosts providers [built-in-hosts-providers] -By default the cluster formation module offers two seed hosts providers to configure the list of seed nodes: a *settings*-based and a *file*-based seed hosts provider. It can be extended to support cloud environments and other forms of seed hosts providers via [discovery plugins](https://www.elastic.co/guide/en/elasticsearch/plugins/current/discovery.html). Seed hosts providers are configured using the `discovery.seed_providers` setting, which defaults to the *settings*-based hosts provider. This setting accepts a list of different providers, allowing you to make use of multiple ways to find the seed hosts for your cluster. +By default the cluster formation module offers two seed hosts providers to configure the list of seed nodes: a *settings*-based and a *file*-based seed hosts provider. It can be extended to support cloud environments and other forms of seed hosts providers via [discovery plugins](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/discovery-plugins.md). Seed hosts providers are configured using the `discovery.seed_providers` setting, which defaults to the *settings*-based hosts provider. This setting accepts a list of different providers, allowing you to make use of multiple ways to find the seed hosts for your cluster. Each seed hosts provider yields the IP addresses or hostnames of the seed nodes. If it returns any hostnames then these are resolved to IP addresses using a DNS lookup. If a hostname resolves to multiple IP addresses then {{es}} tries to find a seed node at all of these addresses. If the hosts provider does not explicitly give the TCP port of the node by then, it will implicitly use the first port in the port range given by `transport.profiles.default.port`, or by `transport.port` if `transport.profiles.default.port` is not set. The number of concurrent lookups is controlled by `discovery.seed_resolver.max_concurrent_resolvers` which defaults to `10`, and the timeout for each lookup is controlled by `discovery.seed_resolver.timeout` which defaults to `5s`. Note that DNS lookups are subject to [JVM DNS caching](../../deploy/self-managed/networkaddress-cache-ttl.md). @@ -75,16 +75,16 @@ You can also add comments to this file. All comments must appear on their lines #### EC2 hosts provider [ec2-hosts-provider] -The [EC2 discovery plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/discovery-ec2.html) adds a hosts provider that uses the [AWS API](https://github.com/aws/aws-sdk-java) to find a list of seed nodes. +The [EC2 discovery plugin](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/discovery-ec2.md) adds a hosts provider that uses the [AWS API](https://github.com/aws/aws-sdk-java) to find a list of seed nodes. #### Azure Classic hosts provider [azure-classic-hosts-provider] -The [Azure Classic discovery plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/discovery-azure-classic.html) adds a hosts provider that uses the Azure Classic API find a list of seed nodes. +The [Azure Classic discovery plugin](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/discovery-azure-classic.md) adds a hosts provider that uses the Azure Classic API find a list of seed nodes. #### Google Compute Engine hosts provider [gce-hosts-provider] -The [GCE discovery plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/discovery-gce.html) adds a hosts provider that uses the GCE API find a list of seed nodes. +The [GCE discovery plugin](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/discovery-gce.md) adds a hosts provider that uses the GCE API find a list of seed nodes. diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md index ae49a37f79..220436de15 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-bootstrap-cluster.md @@ -11,7 +11,7 @@ The initial set of master-eligible nodes is defined in the [`cluster.initial_mas * The [node name](../../deploy/self-managed/important-settings-configuration.md#node-name) of the node. * The node’s hostname if `node.name` is not set, because `node.name` defaults to the node’s hostname. You must use either the fully-qualified hostname or the bare hostname [depending on your system configuration](#modules-discovery-bootstrap-cluster-fqdns). -* The IP address of the node’s [transport publish address](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#modules-network-binding-publishing), if it is not possible to use the `node.name` of the node. This is normally the IP address to which [`network.host`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#common-network-settings) resolves but [this can be overridden](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#advanced-network-settings). +* The IP address of the node’s [transport publish address](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#modules-network-binding-publishing), if it is not possible to use the `node.name` of the node. This is normally the IP address to which [`network.host`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#common-network-settings) resolves but [this can be overridden](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#advanced-network-settings). * The IP address and port of the node’s publish address, in the form `IP:PORT`, if it is not possible to use the `node.name` of the node and there are multiple nodes sharing a single IP address. Do not set `cluster.initial_master_nodes` on master-ineligible nodes. @@ -63,7 +63,7 @@ This message shows the node names `master-a.example.com` and `master-b.example.c ## Choosing a cluster name [bootstrap-cluster-name] -The [`cluster.name`](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-name) setting enables you to create multiple clusters which are separated from each other. Nodes verify that they agree on their cluster name when they first connect to each other, and Elasticsearch will only form a cluster from nodes that all have the same cluster name. The default value for the cluster name is `elasticsearch`, but it is recommended to change this to reflect the logical name of the cluster. +The [`cluster.name`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-name) setting enables you to create multiple clusters which are separated from each other. Nodes verify that they agree on their cluster name when they first connect to each other, and Elasticsearch will only form a cluster from nodes that all have the same cluster name. The default value for the cluster name is `elasticsearch`, but it is recommended to change this to reflect the logical name of the cluster. ## Auto-bootstrapping in development mode [bootstrap-auto-bootstrap] @@ -84,7 +84,7 @@ Once an {{es}} node has joined an existing cluster, or bootstrapped a new cluste If you intended to add a node into an existing cluster but instead bootstrapped a separate single-node cluster then you must start again: 1. Shut down the node. -2. Completely wipe the node by deleting the contents of its [data folder](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#data-path). +2. Completely wipe the node by deleting the contents of its [data folder](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#data-path). 3. Configure `discovery.seed_hosts` or `discovery.seed_providers` and other relevant discovery settings. Ensure `cluster.initial_master_nodes` is not set on any node. 4. Restart the node and verify that it joins the existing cluster rather than forming its own one-node cluster. diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md index 268d45786d..4481774ef2 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md @@ -24,7 +24,7 @@ After a master-eligible node has joined or left the cluster the elected master m ## Master elections [_master_elections] -Elasticsearch uses an election process to agree on an elected master node, both at startup and if the existing elected master fails. Any master-eligible node can start an election, and normally the first election that takes place will succeed. Elections only usually fail when two nodes both happen to start their elections at about the same time, so elections are scheduled randomly on each node to reduce the probability of this happening. Nodes will retry elections until a master is elected, backing off on failure, so that eventually an election will succeed (with arbitrarily high probability). The scheduling of master elections are controlled by the [master election settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html#master-election-settings). +Elasticsearch uses an election process to agree on an elected master node, both at startup and if the existing elected master fails. Any master-eligible node can start an election, and normally the first election that takes place will succeed. Elections only usually fail when two nodes both happen to start their elections at about the same time, so elections are scheduled randomly on each node to reduce the probability of this happening. Nodes will retry elections until a master is elected, backing off on failure, so that eventually an election will succeed (with arbitrarily high probability). The scheduling of master elections are controlled by the [master election settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md#master-election-settings). ## Cluster maintenance, rolling restarts and migrations [_cluster_maintenance_rolling_restarts_and_migrations] diff --git a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-voting.md b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-voting.md index cfa8d155a0..9c0c9229ec 100644 --- a/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-voting.md +++ b/deploy-manage/distributed-architecture/discovery-cluster-formation/modules-discovery-voting.md @@ -32,7 +32,7 @@ The current voting configuration is not necessarily the same as the set of all a Larger voting configurations are usually more resilient, so Elasticsearch normally prefers to add master-eligible nodes to the voting configuration after they join the cluster. Similarly, if a node in the voting configuration leaves the cluster and there is another master-eligible node in the cluster that is not in the voting configuration then it is preferable to swap these two nodes over. The size of the voting configuration is thus unchanged but its resilience increases. -It is not so straightforward to automatically remove nodes from the voting configuration after they have left the cluster. Different strategies have different benefits and drawbacks, so the right choice depends on how the cluster will be used. You can control whether the voting configuration automatically shrinks by using the [`cluster.auto_shrink_voting_configuration` setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html). +It is not so straightforward to automatically remove nodes from the voting configuration after they have left the cluster. Different strategies have different benefits and drawbacks, so the right choice depends on how the cluster will be used. You can control whether the voting configuration automatically shrinks by using the [`cluster.auto_shrink_voting_configuration` setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md). ::::{note} If `cluster.auto_shrink_voting_configuration` is set to `true` (which is the default and recommended value) and there are at least three master-eligible nodes in the cluster, Elasticsearch remains capable of processing cluster state updates as long as all but one of its master-eligible nodes are healthy. diff --git a/deploy-manage/distributed-architecture/kibana-tasks-management.md b/deploy-manage/distributed-architecture/kibana-tasks-management.md index 9090653c04..04996e6dd5 100644 --- a/deploy-manage/distributed-architecture/kibana-tasks-management.md +++ b/deploy-manage/distributed-architecture/kibana-tasks-management.md @@ -26,7 +26,7 @@ If you lose this index, all scheduled alerts and actions are lost. {{kib}} background tasks are managed as follows: -* An {{es}} task index is polled for overdue tasks at 3-second intervals. You can change this interval using the [`xpack.task_manager.poll_interval`](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-settings) setting. +* An {{es}} task index is polled for overdue tasks at 3-second intervals. You can change this interval using the [`xpack.task_manager.poll_interval`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md#task-manager-settings) setting. * Tasks are claimed by updating them in the {{es}} index, using optimistic concurrency control to prevent conflicts. Each {{kib}} instance can run a maximum of 10 concurrent tasks, so a maximum of 10 tasks are claimed each interval. * Tasks are run on the {{kib}} server. * Task Manager ensures that tasks: diff --git a/deploy-manage/distributed-architecture/reading-and-writing-documents.md b/deploy-manage/distributed-architecture/reading-and-writing-documents.md index 248aa7b3a6..fc4a2409cc 100644 --- a/deploy-manage/distributed-architecture/reading-and-writing-documents.md +++ b/deploy-manage/distributed-architecture/reading-and-writing-documents.md @@ -41,7 +41,7 @@ These indexing stages (coordinating, primary, and replica) are sequential. To en Many things can go wrong during indexing — disks can get corrupted, nodes can be disconnected from each other, or some configuration mistake could cause an operation to fail on a replica despite it being successful on the primary. These are infrequent but the primary has to respond to them. -In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing operation will wait (up to 1 minute, by [default](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings)) for the master to promote one of the replicas to be a new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary is isolated from the cluster by a networking issue. See [here](#demoted-primary) for more details. +In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing operation will wait (up to 1 minute, by [default](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-settings)) for the master to promote one of the replicas to be a new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary is isolated from the cluster by a networking issue. See [here](#demoted-primary) for more details. Once the operation has been successfully performed on the primary, the primary has to deal with potential failures when executing it on the replica shards. This may be caused by an actual failure on the replica or due to a network issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start building a new shard copy in order to restore the system to a healthy state. @@ -62,7 +62,7 @@ Reads in Elasticsearch can be very lightweight lookups by ID or a heavy search r When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards, collating the responses, and responding to the client. We call that node the *coordinating node* for that request. The basic flow is as follows: 1. Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices, they typically need to read from multiple shards, each representing a different subset of the data. -2. Select an active copy of each relevant shard, from the shard replication group. This can be either the primary or a replica. By default, {{es}} uses [adaptive replica selection](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shard-routing.html#search-adaptive-replica) to select the shard copies. +2. Select an active copy of each relevant shard, from the shard replication group. This can be either the primary or a replica. By default, {{es}} uses [adaptive replica selection](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/search-shard-routing.md#search-adaptive-replica) to select the shard copies. 3. Send shard level read requests to the selected copies. 4. Combine the results and respond. Note that in the case of get by ID look up, only one shard is relevant and this step can be skipped. diff --git a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery.md b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery.md index c7bd4171d7..0dcabbf44a 100644 --- a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery.md +++ b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery.md @@ -9,7 +9,7 @@ Each [index](../../manage-data/data-store/index-basics.md) in Elasticsearch is d A cluster can contain multiple copies of a shard. Each shard has one distinguished shard copy called the *primary*, and zero or more non-primary copies called *replicas*. The primary shard copy serves as the main entry point for all indexing operations. The operations on the primary shard copy are then forwarded to its replicas. -Replicas maintain redundant copies of your data across the [nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) in your cluster, protecting against hardware failure and increasing capacity to serve read requests like searching or retrieving a document. If the primary shard copy fails, then a replica is promoted to primary and takes over the primary’s responsibilities. +Replicas maintain redundant copies of your data across the [nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md) in your cluster, protecting against hardware failure and increasing capacity to serve read requests like searching or retrieving a document. If the primary shard copy fails, then a replica is promoted to primary and takes over the primary’s responsibilities. Over the course of normal operation, Elasticsearch allocates shard copies to nodes, relocates shard copies across nodes to balance the cluster or satisfy new allocation constraints, and recovers shards to initialize new copies. In this topic, you’ll learn how these operations work and how you can control them. @@ -30,7 +30,7 @@ By default, the primary and replica shard copies for an index can be allocated t You can control how shard copies are allocated using the following settings: -* [Cluster-level shard allocation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html): Use these settings to control how shard copies are allocated and balanced across the entire cluster. For example, you might want to [allocate nodes availability zones](shard-allocation-relocation-recovery/shard-allocation-awareness.md), or prevent certain nodes from being used so you can perform maintenance. +* [Cluster-level shard allocation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md): Use these settings to control how shard copies are allocated and balanced across the entire cluster. For example, you might want to [allocate nodes availability zones](shard-allocation-relocation-recovery/shard-allocation-awareness.md), or prevent certain nodes from being used so you can perform maintenance. * [Index-level shard allocation settings](shard-allocation-relocation-recovery/index-level-shard-allocation.md): Use these settings to control how the shard copies for a specific index are allocated. For example, you might want to allocate an index to a node in a specific data tier, or to an node with specific attributes. @@ -67,7 +67,7 @@ You can determine the cause of a shard recovery using the [recovery](https://www To control how shards are recovered, for example the resources that can be used by recovery operations, and which indices should be prioritized for recovery, you can adjust the following settings: -* [Index recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html) +* [Index recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md) * [Cluster-level shard allocation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html) * [Index-level shard allocation settings](shard-allocation-relocation-recovery/index-level-shard-allocation.md), including [delayed allocation](shard-allocation-relocation-recovery/delaying-allocation-when-node-leaves.md) and [index recovery prioritization](shard-allocation-relocation-recovery/index-level-shard-allocation.md) diff --git a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/delaying-allocation-when-node-leaves.md b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/delaying-allocation-when-node-leaves.md index 64a81b3f80..29e21ced5e 100644 --- a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/delaying-allocation-when-node-leaves.md +++ b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/delaying-allocation-when-node-leaves.md @@ -13,7 +13,7 @@ When a node leaves the cluster for whatever reason, intentional or otherwise, th These actions are intended to protect the cluster against data loss by ensuring that every shard is fully replicated as soon as possible. -Even though we throttle concurrent recoveries both at the [node level](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html) and at the [cluster level](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#cluster-shard-allocation-settings), this shard-shuffle can still put a lot of extra load on the cluster which may not be necessary if the missing node is likely to return soon. Imagine this scenario: +Even though we throttle concurrent recoveries both at the [node level](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md) and at the [cluster level](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#cluster-shard-allocation-settings), this shard-shuffle can still put a lot of extra load on the cluster which may not be necessary if the missing node is likely to return soon. Imagine this scenario: * Node 5 loses network connectivity. * The master promotes a replica shard to primary for each primary that was on Node 5. diff --git a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md index 9476c24413..495199f72f 100644 --- a/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md +++ b/deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md @@ -23,7 +23,7 @@ Learn more about [designing resilient clusters](../../production-guidance/availa To enable shard allocation awareness: -1. Specify the location of each node with a [custom node attribute](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#custom-node-attributes). For example, if you want Elasticsearch to distribute shards across different racks, you might use an awareness attribute called `rack_id`. +1. Specify the location of each node with a [custom node attribute](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#custom-node-attributes). For example, if you want Elasticsearch to distribute shards across different racks, you might use an awareness attribute called `rack_id`. You can set custom attributes in two ways: diff --git a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md index 858393b6f2..2c365f4695 100644 --- a/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md +++ b/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes.md @@ -13,7 +13,7 @@ If you are running a single instance of {{es}}, you have a cluster of one node. :alt: A cluster with one node and three primary shards ::: -You add nodes to a cluster to increase its capacity and reliability. By default, a node is both a data node and eligible to be elected as the master node that controls the cluster. You can also configure a new node for a specific purpose, such as handling ingest requests. For more information, see [Nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html). +You add nodes to a cluster to increase its capacity and reliability. By default, a node is both a data node and eligible to be elected as the master node that controls the cluster. You can also configure a new node for a specific purpose, such as handling ingest requests. For more information, see [Nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md). When you add more nodes to a cluster, it automatically allocates replica shards. When all primary and replica shards are active, the cluster state changes to green. @@ -36,11 +36,11 @@ When {{es}} starts for the first time, the security auto-configuration process b Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. -1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool to generate an enrollment token for your new nodes. +1. In a separate terminal from where {{es}} is running, navigate to the directory where you installed {{es}} and run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool to generate an enrollment token for your new nodes. ```sh bin\elasticsearch-create-enrollment-token -s node @@ -62,7 +62,7 @@ To enroll new nodes in your cluster, create an enrollment token with the `elasti 3. Repeat the previous step for any new nodes that you want to enroll. -For more information about discovery and shard allocation, refer to [*Discovery and cluster formation*](../distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html). +For more information about discovery and shard allocation, refer to [*Discovery and cluster formation*](../distributed-architecture/discovery-cluster-formation.md) and [Cluster-level shard allocation and routing settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md). ## Master-eligible nodes [add-elasticsearch-nodes-master-eligible] @@ -115,7 +115,7 @@ Adding an exclusion for a node creates an entry for that node in the voting conf GET /_cluster/state?filter_path=metadata.cluster_coordination.voting_config_exclusions ``` -This list is limited in size by the `cluster.max_voting_config_exclusions` setting, which defaults to `10`. See [Discovery and cluster formation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html). Since voting configuration exclusions are persistent and limited in number, they must be cleaned up. Normally an exclusion is added when performing some maintenance on the cluster, and the exclusions should be cleaned up when the maintenance is complete. Clusters should have no voting configuration exclusions in normal operation. +This list is limited in size by the `cluster.max_voting_config_exclusions` setting, which defaults to `10`. See [Discovery and cluster formation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md). Since voting configuration exclusions are persistent and limited in number, they must be cleaned up. Normally an exclusion is added when performing some maintenance on the cluster, and the exclusions should be cleaned up when the maintenance is complete. Clusters should have no voting configuration exclusions in normal operation. If a node is excluded from the voting configuration because it is to be shut down permanently, its exclusion can be removed after it is shut down and removed from the cluster. Exclusions can also be cleared if they were created in error or were only required temporarily by specifying `?wait_for_removal=false`. diff --git a/deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md b/deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md index d3d70b8f51..78f32ee9e4 100644 --- a/deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md +++ b/deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md @@ -8,7 +8,7 @@ mapped_pages: There may be [situations where you want to perform a full-cluster restart](../../security/secure-cluster-communications.md) or a rolling restart. In the case of [full-cluster restart](#restart-cluster-full), you shut down and restart all the nodes in the cluster while in the case of [rolling restart](#restart-cluster-rolling), you shut down only one node at a time, so the service remains uninterrupted. ::::{warning} -Nodes exceeding the low watermark threshold will be slow to restart. Reduce the disk usage below the [low watermark](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#cluster-routing-watermark-low) before restarting nodes. +Nodes exceeding the low watermark threshold will be slow to restart. Reduce the disk usage below the [low watermark](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#cluster-routing-watermark-low) before restarting nodes. :::: @@ -29,7 +29,7 @@ Nodes exceeding the low watermark threshold will be slow to restart. Reduce the } ``` - You can also consider [gateway settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html) when restarting large clusters to reduce initial strain while nodes are processing [through discovery](../../distributed-architecture/discovery-cluster-formation.md). + You can also consider [gateway settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/local-gateway.md) when restarting large clusters to reduce initial strain while nodes are processing [through discovery](../../distributed-architecture/discovery-cluster-formation.md). 2. **Stop indexing and perform a flush.** @@ -54,7 +54,7 @@ Nodes exceeding the low watermark threshold will be slow to restart. Reduce the When you disable upgrade mode, the jobs resume using the last model state that was automatically saved. This option avoids the overhead of managing active jobs during the shutdown and is faster than explicitly stopping {{dfeeds}} and closing jobs. - * [Stop all {{dfeeds}} and close all jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-close-job). This option saves the model state at the time of closure. When you reopen the jobs after the cluster restart, they use the exact same model. However, saving the latest model state takes longer than using upgrade mode, especially if you have a lot of jobs or jobs with large model states. + * [Stop all {{dfeeds}} and close all jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-close-job). This option saves the model state at the time of closure. When you reopen the jobs after the cluster restart, they use the exact same model. However, saving the latest model state takes longer than using upgrade mode, especially if you have a lot of jobs or jobs with large model states. 2. **Shut down all nodes.** diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md index 99f6563463..3216e9edc3 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md @@ -25,4 +25,6 @@ $$$start-rpm$$$ $$$_enroll_nodes_in_an_existing_cluster_3$$$ -$$$start-es-deb-systemd$$$ \ No newline at end of file +$$$start-es-deb-systemd$$$ + +$$$fatal-errors$$$ \ No newline at end of file diff --git a/deploy-manage/maintenance/start-stop-services/start-stop-kibana.md b/deploy-manage/maintenance/start-stop-services/start-stop-kibana.md index 8600febf36..ddf689434b 100644 --- a/deploy-manage/maintenance/start-stop-services/start-stop-kibana.md +++ b/deploy-manage/maintenance/start-stop-services/start-stop-kibana.md @@ -30,7 +30,7 @@ If this is the first time you’re starting {{kib}}, this command generates a un 3. Log in to {{kib}} as the `elastic` user with the password that was generated when you started {{es}}. ::::{note} -If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool. These tools are available in the {{es}} `bin` directory. +If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. :::: diff --git a/deploy-manage/manage-spaces.md b/deploy-manage/manage-spaces.md index a39994eb69..1cb6b9ef60 100644 --- a/deploy-manage/manage-spaces.md +++ b/deploy-manage/manage-spaces.md @@ -55,7 +55,7 @@ To go to **Spaces**, find **Stack Management** in the navigation menu or use the The maximum number of spaces that you can have differs by deployment type: * **Serverless projects:** Maximum of 100 spaces. -* **{{stack}} deployments:** Controlled by the `xpack.spaces.maxSpaces` setting. Default is 1000. View the full list of Space settings in [this document](https://www.elastic.co/guide/en/kibana/current/spaces-settings-kb.html). +* **{{stack}} deployments:** Controlled by the `xpack.spaces.maxSpaces` setting. Default is 1000. View the full list of Space settings in [this document](asciidocalypse://docs/kibana/docs/reference/configuration-reference/spaces-settings.md). To create a space: @@ -147,7 +147,7 @@ serverless: unavailable You can create a custom experience for users by configuring the {{kib}} landing page on a per-space basis. The landing page can route users to a specific dashboard, application, or saved object as they enter each space. -To configure the landing page, use the default route setting in [Stack Management > {{kib}} > Advanced settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html#kibana-general-settings). For example, you might set the default route to `/app/dashboards`. +To configure the landing page, use the default route setting in [Stack Management > {{kib}} > Advanced settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md#kibana-general-settings). For example, you might set the default route to `/app/dashboards`. :::{image} ../images/kibana-spaces-configure-landing-page.png :alt: Configure space-level landing page diff --git a/deploy-manage/monitor/kibana-task-manager-health-monitoring.md b/deploy-manage/monitor/kibana-task-manager-health-monitoring.md index 1151b591fb..bc84435eae 100644 --- a/deploy-manage/monitor/kibana-task-manager-health-monitoring.md +++ b/deploy-manage/monitor/kibana-task-manager-health-monitoring.md @@ -33,7 +33,7 @@ Monitoring the `_health` endpoint of each {{kib}} instance in the cluster is the The health monitoring API monitors the performance of Task Manager out of the box. However, certain performance considerations are deployment specific and you can configure them. -A health threshold is the threshold for failed task executions. Once a task exceeds this threshold, a status of `warn` or `error` is set on the task type execution. To configure a health threshold, use the [`xpack.task_manager.monitored_task_execution_thresholds`](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-health-settings) setting. You can apply this this setting to all task types in the system, or to a custom task type. +A health threshold is the threshold for failed task executions. Once a task exceeds this threshold, a status of `warn` or `error` is set on the task type execution. To configure a health threshold, use the [`xpack.task_manager.monitored_task_execution_thresholds`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md#task-manager-health-settings) setting. You can apply this this setting to all task types in the system, or to a custom task type. By default, this setting marks the health of every task type as `warning` when it exceeds 80% failed executions, and as `error` at 90%. Set this value to a number between 0 to 100. The threshold is hit when the value **exceeds** this number. To avoid a status of `error`, set the threshold at 100. To hit `error` the moment any task fails, set the threshold to 0. diff --git a/deploy-manage/monitor/logging-configuration/auditing-search-queries.md b/deploy-manage/monitor/logging-configuration/auditing-search-queries.md index 6c609be5b3..b9d48ed094 100644 --- a/deploy-manage/monitor/logging-configuration/auditing-search-queries.md +++ b/deploy-manage/monitor/logging-configuration/auditing-search-queries.md @@ -38,7 +38,7 @@ Not all events contain the `request.body` attribute, even when the above setting * `run_as_denied` * `anonymous_access_denied` -The `request.body` attribute is printed on the coordinating node only (the node that handles the REST request). Most of these event types are [not included by default](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html#xpack-sa-lf-events-include). +The `request.body` attribute is printed on the coordinating node only (the node that handles the REST request). Most of these event types are [not included by default](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/auding-settings.md#xpack-sa-lf-events-include). A good practical piece of advice is to add `authentication_success` to the event types that are audited (add it to the list in the `xpack.security.audit.logfile.events.include`), as this event type is not audited by default. diff --git a/deploy-manage/monitor/logging-configuration/configuring-audit-logs.md b/deploy-manage/monitor/logging-configuration/configuring-audit-logs.md index 03f84ee07b..6f5469d74d 100644 --- a/deploy-manage/monitor/logging-configuration/configuring-audit-logs.md +++ b/deploy-manage/monitor/logging-configuration/configuring-audit-logs.md @@ -15,7 +15,7 @@ When auditing security events, a single client request might generate multiple a {{es}} configuration options include: - * [{{es}} audited events settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html#event-audit-settings): Use include and exclude filters to control the types of events that get logged. + * [{{es}} audited events settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/auding-settings.md#event-audit-settings): Use include and exclude filters to control the types of events that get logged. * [{{es}} node information settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html#node-audit-settings): Control whether to add or hide node information such as hostname or IP address in the audited events. * [{{es}} ignore policies settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html#audit-event-ignore-policies): Use ignore policies for fine-grained control over which audit events are printed to the log file. @@ -29,7 +29,7 @@ For a complete description of event details and format, refer to the following r ### Kibana auditing configuration -To control the logs that are outputted by Kibana, you can use [{{kib}} ignore filters](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#audit-logging-ignore-filters). These are a list of filters that determine which events should be excluded from the audit log. +To control the logs that are outputted by Kibana, you can use [{{kib}} ignore filters](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#audit-logging-ignore-filters). These are a list of filters that determine which events should be excluded from the audit log. In self-managed systems, you can optionally configure audit logs location, and file/rolling file using [{{kib}} audit logging settings](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#audit-logging-settings). diff --git a/deploy-manage/monitor/logging-configuration/kibana-log-settings-examples.md b/deploy-manage/monitor/logging-configuration/kibana-log-settings-examples.md index 2263461211..e00568f381 100644 --- a/deploy-manage/monitor/logging-configuration/kibana-log-settings-examples.md +++ b/deploy-manage/monitor/logging-configuration/kibana-log-settings-examples.md @@ -29,7 +29,7 @@ logging: ## Log in JSON format [log-in-json-ECS-example] -Log the default log format to JSON layout instead of pattern (the default). With `json` layout, log messages will be formatted as JSON strings in [ECS format](https://www.elastic.co/guide/en/ecs/current/ecs-reference.html) that includes a timestamp, log level, logger, message text and any other metadata that may be associated with the log message itself. +Log the default log format to JSON layout instead of pattern (the default). With `json` layout, log messages will be formatted as JSON strings in [ECS format](asciidocalypse://docs/ecs/docs/reference/ecs/index.md) that includes a timestamp, log level, logger, message text and any other metadata that may be associated with the log message itself. ```yaml logging: diff --git a/deploy-manage/monitor/logging-configuration/kibana-logging.md b/deploy-manage/monitor/logging-configuration/kibana-logging.md index 8118fcab54..76fcddc6a4 100644 --- a/deploy-manage/monitor/logging-configuration/kibana-logging.md +++ b/deploy-manage/monitor/logging-configuration/kibana-logging.md @@ -98,7 +98,7 @@ The pattern layout also offers a `highlight` option that allows you to highlight ### JSON layout [json-layout] -With `json` layout log messages will be formatted as JSON strings in [ECS format](https://www.elastic.co/guide/en/ecs/current/ecs-reference.html) that includes a timestamp, log level, logger, message text and any other metadata that may be associated with the log message itself. +With `json` layout log messages will be formatted as JSON strings in [ECS format](asciidocalypse://docs/ecs/docs/reference/ecs/index.md) that includes a timestamp, log level, logger, message text and any other metadata that may be associated with the log message itself. ## Logger hierarchy [logger-hierarchy] diff --git a/deploy-manage/monitor/logging-configuration/logfile-audit-events-ignore-policies.md b/deploy-manage/monitor/logging-configuration/logfile-audit-events-ignore-policies.md index 61a41f98ba..71f2d273e8 100644 --- a/deploy-manage/monitor/logging-configuration/logfile-audit-events-ignore-policies.md +++ b/deploy-manage/monitor/logging-configuration/logfile-audit-events-ignore-policies.md @@ -14,7 +14,7 @@ applies: The comprehensive audit trail is necessary to ensure accountability. It offers tremendous value during incident response and can even be required for demonstrating compliance. -The drawback of an audited system is represented by the inevitable performance penalty incurred. In all truth, the audit trail spends *I/O ops* that are not available anymore for the user’s queries. Sometimes the verbosity of the audit trail may become a problem that the event type restrictions, [defined by `include` and `exclude`](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html#event-audit-settings), will not alleviate. +The drawback of an audited system is represented by the inevitable performance penalty incurred. In all truth, the audit trail spends *I/O ops* that are not available anymore for the user’s queries. Sometimes the verbosity of the audit trail may become a problem that the event type restrictions, [defined by `include` and `exclude`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/auding-settings.md#event-audit-settings), will not alleviate. **Audit events ignore policies** are a finer way to tune the verbosity of the audit trail. These policies define rules that match audit events which will be *ignored* (read as: not printed). Rules match on the values of attributes of audit events and complement the `include` or `exclude` method. Imagine the corpus of audit events and the policies chopping off unwanted events. With a sole exception, all audit events are subject to the ignore policies. The exception are events of type `security_config_change`, which cannot be filtered out, unless excluded altogether. @@ -22,7 +22,7 @@ The drawback of an audited system is represented by the inevitable performance p When utilizing audit events ignore policies you are acknowledging potential accountability gaps that could render illegitimate actions undetectable. Please take time to review these policies whenever your system architecture changes. :::: -A policy is a named set of filter rules. Each filter rule applies to a single event attribute, one of the `users`, `realms`, `actions`, `roles` or `indices` attributes. The filter rule defines a list of [Lucene regexp](https://www.elastic.co/guide/en/elasticsearch/reference/current/regexp-syntax.html), **any** of which has to match the value of the audit event attribute for the rule to match. A policy matches an event if **all** the rules comprising it match the event. An audit event is ignored, therefore not printed, if it matches **any** policy. All other non-matching events are printed as usual. +A policy is a named set of filter rules. Each filter rule applies to a single event attribute, one of the `users`, `realms`, `actions`, `roles` or `indices` attributes. The filter rule defines a list of [Lucene regexp](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/regexp-syntax.md), **any** of which has to match the value of the audit event attribute for the rule to match. A policy matches an event if **all** the rules comprising it match the event. An audit event is ignored, therefore not printed, if it matches **any** policy. All other non-matching events are printed as usual. All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters` settings namespace. For example, the following policy named *example1* matches events from the *kibana_system* or *admin_user* principals that operate over indices of the wildcard form *app-logs**: diff --git a/deploy-manage/monitor/monitoring-data/config-monitoring-indices-metricbeat-7-internal-collection.md b/deploy-manage/monitor/monitoring-data/config-monitoring-indices-metricbeat-7-internal-collection.md index d914a95356..1f090f1f90 100644 --- a/deploy-manage/monitor/monitoring-data/config-monitoring-indices-metricbeat-7-internal-collection.md +++ b/deploy-manage/monitor/monitoring-data/config-monitoring-indices-metricbeat-7-internal-collection.md @@ -10,7 +10,7 @@ applies: # Configuring indices created by Metricbeat 7 or internal collection [config-monitoring-indices-metricbeat-7-internal-collection] -When monitoring [using {{metricbeat}} 7](../stack-monitoring/collecting-monitoring-data-with-metricbeat.md) or [internal collection](https://www.elastic.co/guide/en/beats/filebeat/current/monitoring-internal-collection.html), data is stored in a set of indices called either: +When monitoring [using {{metricbeat}} 7](../stack-monitoring/collecting-monitoring-data-with-metricbeat.md) or [internal collection](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/monitoring-internal-collection.md), data is stored in a set of indices called either: * `.monitoring-{{product}}-7-mb-{{date}}`, when using {{metricbeat}} 7. * `.monitoring-{{product}}-7-{{date}}`, when using internal collection. diff --git a/deploy-manage/monitor/monitoring-data/configure-stack-monitoring-alerts.md b/deploy-manage/monitor/monitoring-data/configure-stack-monitoring-alerts.md index 1709caff09..28872c2a0c 100644 --- a/deploy-manage/monitor/monitoring-data/configure-stack-monitoring-alerts.md +++ b/deploy-manage/monitor/monitoring-data/configure-stack-monitoring-alerts.md @@ -13,7 +13,7 @@ applies: You can configure Stack monitoring alerts to be sent to you by email when health related events occur in your deployments. To set up email notifications: 1. [Enable logging and monitoring](../stack-monitoring/elastic-cloud-stack-monitoring.md) on deployments for which you want to receive notifications. You need to enable only metrics data being shipped for the notifications to work. -2. In Kibana, configure the email connector to [send email from Elastic Cloud](https://www.elastic.co/guide/en/kibana/current/email-action-type.html#elasticcloud). If you want to use the preconfigured `Elastic-Cloud-SMTP` connector in Elastic Cloud, then you can skip this step. +2. In Kibana, configure the email connector to [send email from Elastic Cloud](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/email-action-type.md#elasticcloud). If you want to use the preconfigured `Elastic-Cloud-SMTP` connector in Elastic Cloud, then you can skip this step. 3. From the Kibana main menu, go to **Stack Monitoring**. On this page you can find a summary of monitoring metrics for your deployment as well as any alerts. 4. Select **Enter setup mode**. 5. On any card showing available alerts, select the **alerts** indicator. Use the menu to select the type of alert for which you’d like to be notified. There are many alert types, including: diff --git a/deploy-manage/monitor/monitoring-data/ec-memory-pressure.md b/deploy-manage/monitor/monitoring-data/ec-memory-pressure.md index 5e3d94bf0e..21b6a69d70 100644 --- a/deploy-manage/monitor/monitoring-data/ec-memory-pressure.md +++ b/deploy-manage/monitor/monitoring-data/ec-memory-pressure.md @@ -22,7 +22,7 @@ The percentage number used in the JVM memory pressure indicator is actually the When the JVM memory pressure reaches 75%, the indicator turns red. At this level, garbage collection becomes more frequent as the memory usage increases, potentially impacting the performance of your cluster. As long as the cluster performance suits your needs, JVM memory pressure above 75% is not a problem in itself, but there is not much spare memory capacity. Review the [common causes of high JVM memory usage](#ec-memory-pressure-causes) to determine your best course of action. -When the JVM memory pressure indicator rises above 95%, {{es}}'s [real memory circuit breaker](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#parent-circuit-breaker) triggers to prevent your instance from running out of memory. This situation can reduce the stability of your cluster and the integrity of your data. Unless you expect the load to drop soon, we recommend that you resize to a larger cluster before you reach this level of memory pressure. Even if you’re planning to optimize your memory usage, it is best to resize the cluster first. Resizing the cluster to increase capacity can give you more time to apply other changes, and also provides the cluster with more resource for when those changes are applied. +When the JVM memory pressure indicator rises above 95%, {{es}}'s [real memory circuit breaker](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md#parent-circuit-breaker) triggers to prevent your instance from running out of memory. This situation can reduce the stability of your cluster and the integrity of your data. Unless you expect the load to drop soon, we recommend that you resize to a larger cluster before you reach this level of memory pressure. Even if you’re planning to optimize your memory usage, it is best to resize the cluster first. Resizing the cluster to increase capacity can give you more time to apply other changes, and also provides the cluster with more resource for when those changes are applied. ## Common causes of high JVM memory usage [ec-memory-pressure-causes] @@ -31,7 +31,7 @@ The two most common reasons for a high JVM memory pressure reading are: **1. Having too many shards per node** -If JVM memory pressure above 75% is a frequent occurrence, the cause is often having too many shards per node relative to the amount of available memory. You can lower the JVM memory pressure by reducing the number of shards or upgrading to a larger cluster. For guidelines, check [How to size your shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html). +If JVM memory pressure above 75% is a frequent occurrence, the cause is often having too many shards per node relative to the amount of available memory. You can lower the JVM memory pressure by reducing the number of shards or upgrading to a larger cluster. For guidelines, check [How to size your shards](/deploy-manage/production-guidance/optimize-performance/size-shards.md). **2. Running expensive queries** diff --git a/deploy-manage/monitor/monitoring-data/ec-vcpu-boost-instance.md b/deploy-manage/monitor/monitoring-data/ec-vcpu-boost-instance.md index b72326108e..0d362031e4 100644 --- a/deploy-manage/monitor/monitoring-data/ec-vcpu-boost-instance.md +++ b/deploy-manage/monitor/monitoring-data/ec-vcpu-boost-instance.md @@ -15,13 +15,13 @@ Elastic Cloud allows smaller instance sizes to get temporarily boosted vCPU when Based on the instance size, the vCPU resources assigned to your instance can be boosted to improve performance temporarily, by using vCPU credits. If credits are available, Elastic Cloud will automatically boost your instance when under heavy load. Boosting is available depending on the instance size: -* Instance sizes up to and including 12 GB of RAM get boosted. The boosted vCPU value is `16 * vCPU ratio`, the vCPU ratios are dependent on the [hardware profile](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html#ec-getting-started-configurations) selected. If an instance is eligible for boosting, the Elastic Cloud console will display **Up to 2.5 vCPU**, depending on the hardware profile selected. The baseline, or unboosted, vCPU value is calculated as: `RAM size * vCPU ratio`. +* Instance sizes up to and including 12 GB of RAM get boosted. The boosted vCPU value is `16 * vCPU ratio`, the vCPU ratios are dependent on the [hardware profile](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md#ec-getting-started-configurations) selected. If an instance is eligible for boosting, the Elastic Cloud console will display **Up to 2.5 vCPU**, depending on the hardware profile selected. The baseline, or unboosted, vCPU value is calculated as: `RAM size * vCPU ratio`. * Instance sizes bigger than 12 GB of RAM do not get boosted. The vCPU value is displayed in the Elastic Cloud console and calculated as follows: `RAM size * vCPU ratio`. ## What are vCPU credits? [ec_what_are_vcpu_credits] -[vCPU](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html#glossary-vcpu) credits enable a smaller instance to perform as if it were assigned the vCPU resources of a larger instance, but only for a limited time. vCPU credits are available only on smaller instances up to and including 8 GB of RAM. +[vCPU](asciidocalypse://docs/docs-content/docs/reference/glossary/index.md#glossary-vcpu) credits enable a smaller instance to perform as if it were assigned the vCPU resources of a larger instance, but only for a limited time. vCPU credits are available only on smaller instances up to and including 8 GB of RAM. vCPU credits persist through cluster restarts, but they are tied to your existing instance nodes. Operations that create new instance nodes will lose existing vCPU credits. This happens when you resize your instance, or if Elastic performs system maintenance on your nodes. diff --git a/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md b/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md index 14f6b1487a..c0a8741e25 100644 --- a/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md +++ b/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md @@ -119,5 +119,5 @@ If you use {{filebeat}} to collect log data from your cluster, you can see its r If you click **Logs**, you can see the most recent logs for the cluster. ::::{tip} -By default, up to 10 log entries are shown. You can show up to 50 log entries by changing the [`monitoring.ui.elasticsearch.logFetchCount` setting](https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html#monitoring-ui-settings). If you changed the default name of filebeat indices, you also need to update `monitoring.ui.logs.index` accordingly. +By default, up to 10 log entries are shown. You can show up to 50 log entries by changing the [`monitoring.ui.elasticsearch.logFetchCount` setting](asciidocalypse://docs/kibana/docs/reference/configuration-reference/monitoring-settings.md#monitoring-ui-settings). If you changed the default name of filebeat indices, you also need to update `monitoring.ui.logs.index` accordingly. :::: diff --git a/deploy-manage/monitor/monitoring-data/logstash-page.md b/deploy-manage/monitor/monitoring-data/logstash-page.md index 4402283331..c188ac0fe3 100644 --- a/deploy-manage/monitor/monitoring-data/logstash-page.md +++ b/deploy-manage/monitor/monitoring-data/logstash-page.md @@ -24,4 +24,4 @@ If you are monitoring Logstash nodes, click **Overview** in the Logstash section 1. To view Logstash node metrics, click **Nodes**. The Nodes section shows the status of each Logstash node. 2. Click the name of a node to view its statistics over time. -For more information, refer to [Monitoring Logstash](https://www.elastic.co/guide/en/logstash/current/configuring-logstash.html). +For more information, refer to [Monitoring Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/monitoring-logstash-legacy.md). diff --git a/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md b/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md index c6576df07e..fece157672 100644 --- a/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md +++ b/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md @@ -47,7 +47,7 @@ The **Stack Monitoring** page in {{kib}} is empty. **Resolution:** -1. Confirm that {{kib}} is seeking monitoring data from the appropriate {{es}} URL. By default, data is retrieved from the cluster specified in the `elasticsearch.hosts` setting in the `kibana.yml` file. If you want to retrieve it from a different monitoring cluster, set `monitoring.ui.elasticsearch.hosts`. See [Monitoring settings](https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html). +1. Confirm that {{kib}} is seeking monitoring data from the appropriate {{es}} URL. By default, data is retrieved from the cluster specified in the `elasticsearch.hosts` setting in the `kibana.yml` file. If you want to retrieve it from a different monitoring cluster, set `monitoring.ui.elasticsearch.hosts`. See [Monitoring settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/monitoring-settings.md). 2. Confirm that there is monitoring data available at that URL. It is stored in indices such as `.monitoring-kibana-*` and `.monitoring-es-*` or `metrics-kibana.stack_monitoring.*`, depending on which method is used to collect monitoring data. At a minimum, you must have monitoring data for the {{es}} production cluster. Once that data exists, {{kib}} can display monitoring data for other products in the cluster. 3. Set the time filter to “Last 1 hour”. When monitoring data appears in your cluster, the page automatically refreshes with the monitoring summary. 4. If using {{agent}}, ensure that all integration assets have been installed in the monitoring cluster. diff --git a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md index 4b45cbdf99..69311a09ed 100644 --- a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md +++ b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md @@ -19,5 +19,5 @@ If you enable monitoring across the {{stack}}, each monitored component is consi For more information, see [Configure monitoring](../stack-monitoring/kibana-monitoring-self-managed.md) and [Monitor a cluster](../../monitor.md). -Want to monitor your fleet of {{agent}}s, too? Use {{fleet}} instead of the Stack Monitoring UI. To learn more, refer to [Monitor {{agent}}s](https://www.elastic.co/guide/en/fleet/current/monitor-elastic-agent.html). +Want to monitor your fleet of {{agent}}s, too? Use {{fleet}} instead of the Stack Monitoring UI. To learn more, refer to [Monitor {{agent}}s](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/monitor-elastic-agent.md). diff --git a/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md b/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md index 95d1639190..e34ff577ac 100644 --- a/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md +++ b/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md @@ -26,13 +26,13 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst 2. Identify which logs you want to monitor. - The {{filebeat}} {{es}} module can handle [audit logs](../logging-configuration/logfile-audit-output.md), [deprecation logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md#deprecation-logging), [gc logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#gc-logging), [server logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md), and [slow logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html). For more information about the location of your {{es}} logs, see the [path.logs](../../deploy/self-managed/important-settings-configuration.md#path-settings) setting. + The {{filebeat}} {{es}} module can handle [audit logs](../logging-configuration/logfile-audit-output.md), [deprecation logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md#deprecation-logging), [gc logs](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#gc-logging), [server logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md), and [slow logs](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/slow-log-settings.md). For more information about the location of your {{es}} logs, see the [path.logs](../../deploy/self-managed/important-settings-configuration.md#path-settings) setting. ::::{important} If there are both structured (`*.json`) and unstructured (plain text) versions of the logs, you must use the structured logs. Otherwise, they might not appear in the appropriate context in {{kib}}. :::: -3. [Install {{filebeat}}](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html) on the {{es}} nodes that contain logs that you want to monitor. +3. [Install {{filebeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md) on the {{es}} nodes that contain logs that you want to monitor. 4. Identify where to send the log data. For example, specify {{es}} output information for your monitoring cluster in the {{filebeat}} configuration file (`filebeat.yml`): @@ -60,7 +60,7 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst If {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{filebeat}} can send metrics successfully. - For more information about these configuration options, see [Configure the {{es}} output](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html). + For more information about these configuration options, see [Configure the {{es}} output](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/elasticsearch-output.md). 5. Optional: Identify where to visualize the data. @@ -81,9 +81,9 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst If {{security-features}} are enabled, you must provide a valid user ID and password so that {{filebeat}} can connect to {{kib}}: 1. Create a user on the monitoring cluster that has the [`kibana_admin` built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md) or equivalent privileges. - 2. Add the `username` and `password` settings to the {{es}} output information in the {{filebeat}} configuration file. The example shows a hard-coded password, but you should store sensitive values in the [secrets keystore](https://www.elastic.co/guide/en/beats/filebeat/current/keystore.html). + 2. Add the `username` and `password` settings to the {{es}} output information in the {{filebeat}} configuration file. The example shows a hard-coded password, but you should store sensitive values in the [secrets keystore](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/keystore.md). - See [Configure the {{kib}} endpoint](https://www.elastic.co/guide/en/beats/filebeat/current/setup-kibana-endpoint.html). + See [Configure the {{kib}} endpoint](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/setup-kibana-endpoint.md). 6. Enable the {{es}} module and set up the initial {{filebeat}} environment on each node. @@ -94,7 +94,7 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst filebeat setup -e ``` - For more information, see [{{es}} module](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-module-elasticsearch.html). + For more information, see [{{es}} module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-module-elasticsearch.md). 7. Configure the {{es}} module in {{filebeat}} on each node. @@ -104,10 +104,10 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst If there are JSON logs, configure the `var.paths` settings to point to them instead of the plain text logs. :::: -8. [Start {{filebeat}}](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-starting.html) on each node. +8. [Start {{filebeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-starting.md) on each node. ::::{note} - Depending on how you’ve installed {{filebeat}}, you might see errors related to file ownership or permissions when you try to run {{filebeat}} modules. See [Config file ownership and permissions](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html). + Depending on how you’ve installed {{filebeat}}, you might see errors related to file ownership or permissions when you try to run {{filebeat}} modules. See [Config file ownership and permissions](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md). :::: 9. Check whether the appropriate indices exist on the monitoring cluster. diff --git a/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md b/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md index 7e609801fa..7b13484625 100644 --- a/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md +++ b/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md @@ -19,7 +19,7 @@ Want to use {{agent}} instead? Refer to [Collecting monitoring data with {{agent :alt: Example monitoring architecture ::: -1. [Install {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html). Ideally install a single {{metricbeat}} instance configured with `scope: cluster` and configure `hosts` to point to an endpoint (e.g. a load-balancing proxy) which directs requests to the master-ineligible nodes in the cluster. If this is not possible then install one {{metricbeat}} instance for each {{es}} node in the production cluster and use the default `scope: node`. When {{metricbeat}} is monitoring {{es}} with `scope: node` then you must install a {{metricbeat}} instance for each {{es}} node. If you don’t, some metrics will not be collected. {{metricbeat}} with `scope: node` collects most of the metrics from the elected master of the cluster, so you must scale up all your master-eligible nodes to account for this extra load and you should not use this mode if you have dedicated master nodes. +1. [Install {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md). Ideally install a single {{metricbeat}} instance configured with `scope: cluster` and configure `hosts` to point to an endpoint (e.g. a load-balancing proxy) which directs requests to the master-ineligible nodes in the cluster. If this is not possible then install one {{metricbeat}} instance for each {{es}} node in the production cluster and use the default `scope: node`. When {{metricbeat}} is monitoring {{es}} with `scope: node` then you must install a {{metricbeat}} instance for each {{es}} node. If you don’t, some metrics will not be collected. {{metricbeat}} with `scope: node` collects most of the metrics from the elected master of the cluster, so you must scale up all your master-eligible nodes to account for this extra load and you should not use this mode if you have dedicated master nodes. 2. Enable the {{es}} module in {{metricbeat}} on each {{es}} node. For example, to enable the default configuration for the {{stack-monitor-features}} in the `modules.d` directory, run the following command: @@ -28,7 +28,7 @@ Want to use {{agent}} instead? Refer to [Collecting monitoring data with {{agent metricbeat modules enable elasticsearch-xpack ``` - For more information, refer to [{{es}} module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-elasticsearch.html). + For more information, refer to [{{es}} module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-elasticsearch.md). 3. Configure the {{es}} module in {{metricbeat}} on each {{es}} node. @@ -57,11 +57,11 @@ Want to use {{agent}} instead? Refer to [Collecting monitoring data with {{agent 1. Create a user on the production cluster that has the [`remote_monitoring_collector` built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, use the [`remote_monitoring_user` built-in user](../../users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} module configuration file. - 3. If TLS is enabled on the HTTP layer of your {{es}} cluster, you must either use https as the URL scheme in the `hosts` setting or add the `ssl.enabled: true` setting. Depending on the TLS configuration of your {{es}} cluster, you might also need to specify [additional ssl.*](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-ssl.html) settings. + 3. If TLS is enabled on the HTTP layer of your {{es}} cluster, you must either use https as the URL scheme in the `hosts` setting or add the `ssl.enabled: true` setting. Depending on the TLS configuration of your {{es}} cluster, you might also need to specify [additional ssl.*](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-ssl.md) settings. 4. Optional: Disable the system module in {{metricbeat}}. - By default, the [system module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-system.html) is enabled. The information it collects, however, is not shown on the **Monitoring** page in {{kib}}. Unless you want to use that information for other purposes, run the following command: + By default, the [system module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-system.md) is enabled. The information it collects, however, is not shown on the **Monitoring** page in {{kib}}. Unless you want to use that information for other purposes, run the following command: ```sh metricbeat modules disable system @@ -102,8 +102,8 @@ Want to use {{agent}} instead? Refer to [Collecting monitoring data with {{agent 1. Create a user on the monitoring cluster that has the [`remote_monitoring_agent` built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, use the [`remote_monitoring_user` built-in user](../../users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file. - For more information about these configuration options, see [Configure the {{es}} output](https://www.elastic.co/guide/en/beats/metricbeat/current/elasticsearch-output.html). + For more information about these configuration options, see [Configure the {{es}} output](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/elasticsearch-output.md). -6. [Start {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-starting.html) on each node. +6. [Start {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-starting.md) on each node. 7. [View the monitoring data in {{kib}}](kibana-monitoring-data.md). diff --git a/deploy-manage/monitor/stack-monitoring/ece-stack-monitoring.md b/deploy-manage/monitor/stack-monitoring/ece-stack-monitoring.md index 4f1be782c0..1da4335e25 100644 --- a/deploy-manage/monitor/stack-monitoring/ece-stack-monitoring.md +++ b/deploy-manage/monitor/stack-monitoring/ece-stack-monitoring.md @@ -181,7 +181,7 @@ When shipping logs to a monitoring deployment there are more logging features av #### For {{es}}: [ece-extra-logging-features-elasticsearch] * [Audit logging](../logging-configuration/enabling-audit-logs.md) - logs security-related events on your deployment -* [Slow query and index logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html) - helps find and debug slow queries and indexing +* [Slow query and index logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/slow-log-settings.md) - helps find and debug slow queries and indexing * Verbose logging - helps debug stack issues by increasing component logs After you’ve enabled log delivery on your deployment, you can [add the Elasticsearch user settings](../../deploy/cloud-enterprise/edit-stack-settings.md) to enable these features. diff --git a/deploy-manage/monitor/stack-monitoring/eck-stack-monitoring.md b/deploy-manage/monitor/stack-monitoring/eck-stack-monitoring.md index 5bbeafacd1..1032c2c7ec 100644 --- a/deploy-manage/monitor/stack-monitoring/eck-stack-monitoring.md +++ b/deploy-manage/monitor/stack-monitoring/eck-stack-monitoring.md @@ -8,7 +8,7 @@ applies: # Enable stack monitoring on ECK deployments [k8s-stack-monitoring] -You can enable [Stack Monitoring](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html) on Elasticsearch, Kibana, Beats and Logstash to collect and ship their metrics and logs to a monitoring cluster. Although self-monitoring is possible, it is advised to use a [separate monitoring cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-overview.html). +You can enable [Stack Monitoring](/deploy-manage/monitor.md) on Elasticsearch, Kibana, Beats and Logstash to collect and ship their metrics and logs to a monitoring cluster. Although self-monitoring is possible, it is advised to use a [separate monitoring cluster](/deploy-manage/monitor/stack-monitoring.md). To enable Stack Monitoring, simply reference the monitoring Elasticsearch cluster in the `spec.monitoring` section of their specification. @@ -80,7 +80,7 @@ If Logs Stack Monitoring is configured for a Beat, and custom container argument :::: -You can also enable Stack Monitoring on a single Stack component only. In case Elasticsearch is not monitored, other Stack components will not be available on the Stack Monitoring Kibana page (check [View monitoring data in Kibana](https://www.elastic.co/guide/en/kibana/current/monitoring-data.html#monitoring-data)). +You can also enable Stack Monitoring on a single Stack component only. In case Elasticsearch is not monitored, other Stack components will not be available on the Stack Monitoring Kibana page (check [View monitoring data in Kibana](/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md)). diff --git a/deploy-manage/monitor/stack-monitoring/es-http-exporter.md b/deploy-manage/monitor/stack-monitoring/es-http-exporter.md index 6bf7dd4a8c..dc7f43a18e 100644 --- a/deploy-manage/monitor/stack-monitoring/es-http-exporter.md +++ b/deploy-manage/monitor/stack-monitoring/es-http-exporter.md @@ -17,9 +17,9 @@ If you have previously configured legacy collection methods, you should migrate The `http` exporter is the preferred exporter in the {{es}} {{monitor-features}} because it enables the use of a separate monitoring cluster. As a secondary benefit, it avoids using a production cluster node as a coordinating node for indexing monitoring data because all requests are HTTP requests to the monitoring cluster. -The `http` exporter uses the low-level {{es}} REST Client, which enables it to send its data to any {{es}} cluster it can access through the network. Its requests make use of the [`filter_path`](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#common-options-response-filtering) parameter to reduce bandwidth whenever possible, which helps to ensure that communications between the production and monitoring clusters are as lightweight as possible. +The `http` exporter uses the low-level {{es}} REST Client, which enables it to send its data to any {{es}} cluster it can access through the network. Its requests make use of the [`filter_path`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#common-options-response-filtering) parameter to reduce bandwidth whenever possible, which helps to ensure that communications between the production and monitoring clusters are as lightweight as possible. -The `http` exporter supports a number of settings that control how it communicates over HTTP to remote clusters. In most cases, it is not necessary to explicitly configure these settings. For detailed descriptions, see [Monitoring settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html). +The `http` exporter supports a number of settings that control how it communicates over HTTP to remote clusters. In most cases, it is not necessary to explicitly configure these settings. For detailed descriptions, see [Monitoring settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md). ```yaml xpack.monitoring.exporters: diff --git a/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md b/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md index 4c5f954263..d4c077b0fa 100644 --- a/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md +++ b/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md @@ -33,7 +33,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). :::: - For more information, see [Monitoring settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). + For more information, see [Monitoring settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). 2. Set the `xpack.monitoring.collection.enabled` setting to `true` on each node in the cluster. By default, it is disabled (`false`). @@ -146,7 +146,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 5. If you updated settings in the `elasticsearch.yml` files on your production cluster, restart {{es}}. See [*Stopping Elasticsearch*](../../maintenance/start-stop-services/start-stop-elasticsearch.md) and [*Starting Elasticsearch*](../../maintenance/start-stop-services/start-stop-elasticsearch.md). ::::{tip} - You may want to temporarily [disable shard allocation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html) before you restart your nodes to avoid unnecessary shard reallocation during the install process. + You may want to temporarily [disable shard allocation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md) before you restart your nodes to avoid unnecessary shard reallocation during the install process. :::: 6. Optional: [Configure the indices that store the monitoring data](../monitoring-data/configuring-data-streamsindices-for-monitoring.md). diff --git a/deploy-manage/monitor/stack-monitoring/es-local-exporter.md b/deploy-manage/monitor/stack-monitoring/es-local-exporter.md index 2e4c116b93..abaf5b96fe 100644 --- a/deploy-manage/monitor/stack-monitoring/es-local-exporter.md +++ b/deploy-manage/monitor/stack-monitoring/es-local-exporter.md @@ -38,7 +38,7 @@ The elected master node is the only node to set up resources for the `local` exp One benefit of the `local` exporter is that it lives within the cluster and therefore no extra configuration is required when the cluster is secured with {{stack}} {{security-features}}. All operations, including indexing operations, that occur from a `local` exporter make use of the internal transport mechanisms within {{es}}. This behavior enables the exporter to be used without providing any user credentials when {{security-features}} are enabled. -For more information about the configuration options for the `local` exporter, see [Local exporter settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#local-exporter-settings). +For more information about the configuration options for the `local` exporter, see [Local exporter settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#local-exporter-settings). ## Cleaner service [local-exporter-cleaner] diff --git a/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md b/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md index a37d192689..90e7b9cd63 100644 --- a/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md +++ b/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md @@ -43,7 +43,7 @@ Collection is currently done serially, rather than in parallel, to avoid extra o :::: -For more information about the configuration options for the collectors, see [Monitoring collection settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#monitoring-collection-settings). +For more information about the configuration options for the collectors, see [Monitoring collection settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#monitoring-collection-settings). ## Collecting data from across the Elastic Stack [es-monitoring-stack] diff --git a/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md b/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md index 6c4ace2774..f22994cba1 100644 --- a/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md +++ b/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md @@ -39,11 +39,11 @@ When the exporters route monitoring data into the monitoring cluster, they use ` Routing monitoring data involves indexing it into the appropriate monitoring indices. Once the data is indexed, it exists in a monitoring index that, by default, is named with a daily index pattern. For {{es}} monitoring data, this is an index that matches `.monitoring-es-6-*`. From there, the data lives inside the monitoring cluster and must be curated or cleaned up as necessary. If you do not curate the monitoring data, it eventually fills up the nodes and the cluster might fail due to lack of disk space. ::::{tip} -You are strongly recommended to manage the curation of indices and particularly the monitoring indices. To do so, you can take advantage of the [cleaner service](es-local-exporter.md#local-exporter-cleaner) or [Elastic Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html). +You are strongly recommended to manage the curation of indices and particularly the monitoring indices. To do so, you can take advantage of the [cleaner service](es-local-exporter.md#local-exporter-cleaner) or [Elastic Curator](asciidocalypse://docs/curator/docs/reference/elasticsearch/elasticsearch-client-curator/index.md). :::: -There is also a disk watermark (known as the flood stage watermark), which protects clusters from running out of disk space. When this feature is triggered, it makes all indices (including monitoring indices) read-only until the issue is fixed and a user manually makes the index writeable again. While an active monitoring index is read-only, it will naturally fail to write (index) new data and will continuously log errors that indicate the write failure. For more information, see [Disk-based shard allocation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#disk-based-shard-allocation). +There is also a disk watermark (known as the flood stage watermark), which protects clusters from running out of disk space. When this feature is triggered, it makes all indices (including monitoring indices) read-only until the issue is fixed and a user manually makes the index writeable again. While an active monitoring index is read-only, it will naturally fail to write (index) new data and will continuously log errors that indicate the write failure. For more information, see [Disk-based shard allocation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#disk-based-shard-allocation). ## Default exporters [es-monitoring-default-exporter] @@ -75,7 +75,7 @@ Before exporters can route monitoring data, they must set up certain {{es}} reso The templates are ordinary {{es}} templates that control the default settings and mappings for the monitoring indices. -By default, monitoring indices are created daily (for example, `.monitoring-es-6-2017.08.26`). You can change the default date suffix for monitoring indices with the `index.name.time_format` setting. You can use this setting to control how frequently monitoring indices are created by a specific `http` exporter. You cannot use this setting with `local` exporters. For more information, see [HTTP exporter settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#http-exporter-settings). +By default, monitoring indices are created daily (for example, `.monitoring-es-6-2017.08.26`). You can change the default date suffix for monitoring indices with the `index.name.time_format` setting. You can use this setting to control how frequently monitoring indices are created by a specific `http` exporter. You cannot use this setting with `local` exporters. For more information, see [HTTP exporter settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#http-exporter-settings). ::::{warning} Some users create their own templates that match *all* index patterns, which therefore impact the monitoring indices that get created. It is critical that you do not disable `_source` storage for the monitoring indices. If you do, {{kib}} {{monitor-features}} do not work and you cannot visualize monitoring data for your cluster. diff --git a/deploy-manage/monitor/stack-monitoring/k8s_when_to_use_it.md b/deploy-manage/monitor/stack-monitoring/k8s_when_to_use_it.md index 5f67296ac1..01d44a46bb 100644 --- a/deploy-manage/monitor/stack-monitoring/k8s_when_to_use_it.md +++ b/deploy-manage/monitor/stack-monitoring/k8s_when_to_use_it.md @@ -12,5 +12,5 @@ This feature is a good solution if you need to monitor your Elastic applications * to Metricbeat to allow queriying the k8s API * to Filebeat to deploy a privileged DaemonSet -However, for maximum efficiency and minimising resource consumption, or advanced use cases that require specific Beats configurations, you can deploy a standalone Metricbeat Deployment and a Filebeat Daemonset. Check the [Beats configuration Examples](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-beat-configuration-examples.html) for more information. +However, for maximum efficiency and minimising resource consumption, or advanced use cases that require specific Beats configurations, you can deploy a standalone Metricbeat Deployment and a Filebeat Daemonset. Check the [Beats configuration Examples](/deploy-manage/deploy/cloud-on-k8s/configuration-examples-beats.md) for more information. diff --git a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md index c8761eb358..66eb07cd04 100644 --- a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md +++ b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md @@ -32,7 +32,7 @@ If you use a separate monitoring cluster to store the monitoring data, it is str To learn more about typical monitoring architectures, see [How monitoring works](../stack-monitoring.md) and [Monitoring in a production environment](elasticsearch-monitoring-self-managed.md). -2. Verify that `monitoring.ui.enabled` is set to `true`, which is the default value, in the `kibana.yml` file. For more information, see [Monitoring settings](https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html). +2. Verify that `monitoring.ui.enabled` is set to `true`, which is the default value, in the `kibana.yml` file. For more information, see [Monitoring settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/monitoring-settings.md). 3. If the Elastic {{security-features}} are enabled on the monitoring cluster, you must provide a user ID and password so {{kib}} can retrieve the data. 1. Create a user that has the `monitoring_user` [built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md) on the monitoring cluster. diff --git a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-legacy.md b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-legacy.md index 07c3755690..3aebe99ff3 100644 --- a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-legacy.md +++ b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-legacy.md @@ -59,9 +59,9 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). } ``` - For more information, see [Monitoring settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). + For more information, see [Monitoring settings in {{es}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). -2. Verify that `monitoring.enabled` and `monitoring.kibana.collection.enabled` are set to `true` in the `kibana.yml` file. These are the default values. For more information, see [Monitoring settings in {{kib}}](https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html). +2. Verify that `monitoring.enabled` and `monitoring.kibana.collection.enabled` are set to `true` in the `kibana.yml` file. These are the default values. For more information, see [Monitoring settings in {{kib}}](asciidocalypse://docs/kibana/docs/reference/configuration-reference/monitoring-settings.md). 3. Identify where to send monitoring data. {{kib}} automatically sends metrics to the {{es}} cluster specified in the `elasticsearch.hosts` setting in the `kibana.yml` file. This property has a default value of `http://localhost:9200`.
::::{tip} diff --git a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md index f5aab0743d..37b3e6645a 100644 --- a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md +++ b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md @@ -27,7 +27,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). monitoring.kibana.collection.enabled: false ``` - Leave the `monitoring.enabled` set to its default value (`true`). For more information, see [Monitoring settings in {{kib}}](https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html). + Leave the `monitoring.enabled` set to its default value (`true`). For more information, see [Monitoring settings in {{kib}}](asciidocalypse://docs/kibana/docs/reference/configuration-reference/monitoring-settings.md). 2. [Start {{kib}}](../../maintenance/start-stop-services/start-stop-kibana.md). 3. Set the `xpack.monitoring.collection.enabled` setting to `true` on each node in the production cluster. By default, it is disabled (`false`). @@ -62,9 +62,9 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). } ``` - For more information, see [Monitoring settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). + For more information, see [Monitoring settings in {{es}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). -4. [Install {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html) on the same server as {{kib}}. +4. [Install {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md) on the same server as {{kib}}. 5. Enable the {{kib}} {{xpack}} module in {{metricbeat}}.
For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -73,7 +73,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). metricbeat modules enable kibana-xpack ``` - For more information, see [Specify which modules to run](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-metricbeat.html) and [{{kib}} module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-kibana.html). + For more information, see [Specify which modules to run](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-metricbeat.md) and [{{kib}} module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-kibana.md). 6. Configure the {{kib}} {{xpack}} module in {{metricbeat}}.
@@ -100,7 +100,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 7. Optional: Disable the system module in {{metricbeat}}. - By default, the [system module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-system.html) is enabled. The information it collects, however, is not shown on the **Monitoring** page in {{kib}}. Unless you want to use that information for other purposes, run the following command: + By default, the [system module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-system.md) is enabled. The information it collects, however, is not shown on the **Monitoring** page in {{kib}}. Unless you want to use that information for other purposes, run the following command: ```sh metricbeat modules disable system @@ -141,8 +141,8 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 1. Create a user on the monitoring cluster that has the `remote_monitoring_agent` [built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, use the `remote_monitoring_user` [built-in user](../../users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file. - For more information about these configuration options, see [Configure the {{es}} output](https://www.elastic.co/guide/en/beats/metricbeat/current/elasticsearch-output.html). + For more information about these configuration options, see [Configure the {{es}} output](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/elasticsearch-output.md). -9. [Start {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-starting.html). +9. [Start {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-starting.md). 10. [View the monitoring data in {{kib}}](/deploy-manage/monitor/monitoring-data.md). diff --git a/deploy-manage/production-guidance.md b/deploy-manage/production-guidance.md index cdee264506..27e37ca0de 100644 --- a/deploy-manage/production-guidance.md +++ b/deploy-manage/production-guidance.md @@ -10,17 +10,17 @@ This section provides some best practices for managing your data to help you set ## Plan your data structure, availability, and formatting [ec_plan_your_data_structure_availability_and_formatting] -* Build a [data architecture](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) that best fits your needs. Your Elasticsearch Service deployment comes with default hot tier {{es}} nodes that store your most frequently accessed data. Based on your own access and retention policies, you can add warm, cold, frozen data tiers, and automated deletion of old data. -* Make your data [highly available](https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability.html) for production environments or otherwise critical data stores, and take regular [backup snapshots](tools/snapshot-and-restore.md). -* Normalize event data to better analyze, visualize, and correlate your events by adopting the [Elastic Common Schema](https://www.elastic.co/guide/en/ecs/current/ecs-getting-started.html) (ECS). Elastic integrations use ECS out-of-the-box. If you are writing your own integrations, ECS is recommended. +* Build a [data architecture](/manage-data/lifecycle/data-tiers.md) that best fits your needs. Your Elasticsearch Service deployment comes with default hot tier {{es}} nodes that store your most frequently accessed data. Based on your own access and retention policies, you can add warm, cold, frozen data tiers, and automated deletion of old data. +* Make your data [highly available](/deploy-manage/tools.md) for production environments or otherwise critical data stores, and take regular [backup snapshots](tools/snapshot-and-restore.md). +* Normalize event data to better analyze, visualize, and correlate your events by adopting the [Elastic Common Schema](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-getting-started.md) (ECS). Elastic integrations use ECS out-of-the-box. If you are writing your own integrations, ECS is recommended. ## Optimize data storage and retention [ec_optimize_data_storage_and_retention] -Once you have your data tiers deployed and you have data flowing, you can [manage the index lifecycle](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-lifecycle-management.html). +Once you have your data tiers deployed and you have data flowing, you can [manage the index lifecycle](/manage-data/lifecycle/index-lifecycle-management.md). ::::{tip} -[Elastic integrations](https://www.elastic.co/integrations) provide default index lifecycle policies, and you can [build your own policies for your custom integrations](https://www.elastic.co/guide/en/elasticsearch/reference/current/getting-started-index-lifecycle-management.html). +[Elastic integrations](https://www.elastic.co/integrations) provide default index lifecycle policies, and you can [build your own policies for your custom integrations](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md). :::: diff --git a/deploy-manage/production-guidance/availability-and-resilience.md b/deploy-manage/production-guidance/availability-and-resilience.md index b4ce8f1bfa..278b17e1d9 100644 --- a/deploy-manage/production-guidance/availability-and-resilience.md +++ b/deploy-manage/production-guidance/availability-and-resilience.md @@ -10,7 +10,7 @@ Distributed systems like {{es}} are designed to keep working even if some of the There is a limit to how small a resilient cluster can be. All {{es}} clusters require the following components to function: * One [elected master node](../distributed-architecture/discovery-cluster-formation/modules-discovery-quorums.md) -* At least one node for each [role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) +* At least one node for each [role](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md) * At least one copy of every [shard](../../deploy-manage/index.md) A resilient cluster requires redundancy for every required cluster component. This means a resilient cluster must have the following components: diff --git a/deploy-manage/production-guidance/availability-and-resilience/resilience-in-larger-clusters.md b/deploy-manage/production-guidance/availability-and-resilience/resilience-in-larger-clusters.md index b212d0e942..529d3f5dd5 100644 --- a/deploy-manage/production-guidance/availability-and-resilience/resilience-in-larger-clusters.md +++ b/deploy-manage/production-guidance/availability-and-resilience/resilience-in-larger-clusters.md @@ -9,7 +9,7 @@ It’s not unusual for nodes to share common infrastructure, such as network int {{es}} expects node-to-node connections to be reliable, have low latency, and have adequate bandwidth. Many {{es}} tasks require multiple round-trips between nodes. A slow or unreliable interconnect may have a significant effect on the performance and stability of your cluster. -For example, a few milliseconds of latency added to each round-trip can quickly accumulate into a noticeable performance penalty. An unreliable network may have frequent network partitions. {{es}} will automatically recover from a network partition as quickly as it can but your cluster may be partly unavailable during a partition and will need to spend time and resources to [resynchronize any missing data](../../distributed-architecture/shard-allocation-relocation-recovery.md#shard-recovery) and [rebalance](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#shards-rebalancing-settings) itself once the partition heals. Recovering from a failure may involve copying a large amount of data between nodes so the recovery time is often determined by the available bandwidth. +For example, a few milliseconds of latency added to each round-trip can quickly accumulate into a noticeable performance penalty. An unreliable network may have frequent network partitions. {{es}} will automatically recover from a network partition as quickly as it can but your cluster may be partly unavailable during a partition and will need to spend time and resources to [resynchronize any missing data](../../distributed-architecture/shard-allocation-relocation-recovery.md#shard-recovery) and [rebalance](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#shards-rebalancing-settings) itself once the partition heals. Recovering from a failure may involve copying a large amount of data between nodes so the recovery time is often determined by the available bandwidth. If you’ve divided your cluster into zones, the network connections within each zone are typically of higher quality than the connections between the zones. Ensure the network connections between zones are of sufficiently high quality. You will see the best results by locating all your zones within a single data center with each zone having its own independent power supply and other supporting infrastructure. You can also *stretch* your cluster across nearby data centers as long as the network interconnection between each pair of data centers is good enough. diff --git a/deploy-manage/production-guidance/availability-and-resilience/resilience-in-small-clusters.md b/deploy-manage/production-guidance/availability-and-resilience/resilience-in-small-clusters.md index 337e17ff2b..eee26f6c39 100644 --- a/deploy-manage/production-guidance/availability-and-resilience/resilience-in-small-clusters.md +++ b/deploy-manage/production-guidance/availability-and-resilience/resilience-in-small-clusters.md @@ -11,7 +11,7 @@ In smaller clusters, it is most important to be resilient to single-node failure If your cluster consists of one node, that single node must do everything. To accommodate this, {{es}} assigns nodes every role by default. -A single node cluster is not resilient. If the node fails, the cluster will stop working. Because there are no replicas in a one-node cluster, you cannot store your data redundantly. However, by default at least one replica is required for a [`green` cluster health status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health). To ensure your cluster can report a `green` status, override the default by setting [`index.number_of_replicas`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) to `0` on every index. +A single node cluster is not resilient. If the node fails, the cluster will stop working. Because there are no replicas in a one-node cluster, you cannot store your data redundantly. However, by default at least one replica is required for a [`green` cluster health status](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health). To ensure your cluster can report a `green` status, override the default by setting [`index.number_of_replicas`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-settings) to `0` on every index. If the node fails, you may need to restore an older copy of any lost indices from a [snapshot](../../tools/snapshot-and-restore.md). diff --git a/deploy-manage/production-guidance/general-recommendations.md b/deploy-manage/production-guidance/general-recommendations.md index c29f420e56..d5f148aa5e 100644 --- a/deploy-manage/production-guidance/general-recommendations.md +++ b/deploy-manage/production-guidance/general-recommendations.md @@ -8,14 +8,14 @@ mapped_pages: ## Don’t return large result sets [large-size] -Elasticsearch is designed as a search engine, which makes it very good at getting back the top documents that match a query. However, it is not as good for workloads that fall into the database domain, such as retrieving all documents that match a particular query. If you need to do this, make sure to use the [Scroll](https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results) API. +Elasticsearch is designed as a search engine, which makes it very good at getting back the top documents that match a query. However, it is not as good for workloads that fall into the database domain, such as retrieving all documents that match a particular query. If you need to do this, make sure to use the [Scroll](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/paginate-search-results.md#scroll-search-results) API. ## Avoid large documents [maximum-document-size] -Given that the default [`http.max_content_length`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#http-settings) is set to 100MB, Elasticsearch will refuse to index any document that is larger than that. You might decide to increase that particular setting, but Lucene still has a limit of about 2GB. +Given that the default [`http.max_content_length`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-settings) is set to 100MB, Elasticsearch will refuse to index any document that is larger than that. You might decide to increase that particular setting, but Lucene still has a limit of about 2GB. -Even without considering hard limits, large documents are usually not practical. Large documents put more stress on network, memory usage and disk, even for search requests that do not request the `_source` since Elasticsearch needs to fetch the `_id` of the document in all cases, and the cost of getting this field is bigger for large documents due to how the filesystem cache works. Indexing this document can use an amount of memory that is a multiplier of the original size of the document. Proximity search (phrase queries for instance) and [highlighting](https://www.elastic.co/guide/en/elasticsearch/reference/current/highlighting.html) also become more expensive since their cost directly depends on the size of the original document. +Even without considering hard limits, large documents are usually not practical. Large documents put more stress on network, memory usage and disk, even for search requests that do not request the `_source` since Elasticsearch needs to fetch the `_id` of the document in all cases, and the cost of getting this field is bigger for large documents due to how the filesystem cache works. Indexing this document can use an amount of memory that is a multiplier of the original size of the document. Proximity search (phrase queries for instance) and [highlighting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/highlighting.md) also become more expensive since their cost directly depends on the size of the original document. It is sometimes useful to reconsider what the unit of information should be. For instance, the fact you want to make books searchable doesn’t necessarily mean that a document should consist of a whole book. It might be a better idea to use chapters or even paragraphs as documents, and then have a property in these documents that identifies which book they belong to. This does not only avoid the issues with large documents, it also makes the search experience better. For instance if a user searches for two words `foo` and `bar`, a match across different chapters is probably very poor, while a match within the same paragraph is likely good. diff --git a/deploy-manage/production-guidance/kibana-alerting-production-considerations.md b/deploy-manage/production-guidance/kibana-alerting-production-considerations.md index e79aa5f82d..bece9d8e8a 100644 --- a/deploy-manage/production-guidance/kibana-alerting-production-considerations.md +++ b/deploy-manage/production-guidance/kibana-alerting-production-considerations.md @@ -27,7 +27,7 @@ For more details on Task Manager, see [Running background tasks](../distributed- ::::{important} Rule and action tasks can run late or at an inconsistent schedule. This is typically a symptom of the specific usage of the cluster in question. -You can address such issues by tweaking the [Task Manager settings](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-settings) or scaling the deployment to better suit your use case. +You can address such issues by tweaking the [Task Manager settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md#task-manager-settings) or scaling the deployment to better suit your use case. For detailed guidance, see [Alerting Troubleshooting](../../explore-analyze/alerts-cases/alerts/alerting-troubleshooting.md). @@ -63,7 +63,7 @@ For more information on data stream lifecycle management, see: [Data stream life ## Circuit breakers [alerting-circuit-breakers] -There are several scenarios where running alerting rules and actions can start to negatively impact the overall health of a {{kib}} instance either by clogging up Task Manager throughput or by consuming so much CPU/memory that other operations cannot complete in a reasonable amount of time. There are several [configurable](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html#alert-settings) circuit breakers to help minimize these effects. +There are several scenarios where running alerting rules and actions can start to negatively impact the overall health of a {{kib}} instance either by clogging up Task Manager throughput or by consuming so much CPU/memory that other operations cannot complete in a reasonable amount of time. There are several [configurable](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md#alert-settings) circuit breakers to help minimize these effects. ### Rules with very short intervals [_rules_with_very_short_intervals] diff --git a/deploy-manage/production-guidance/kibana-task-manager-scaling-considerations.md b/deploy-manage/production-guidance/kibana-task-manager-scaling-considerations.md index f933b065b7..38ae485b93 100644 --- a/deploy-manage/production-guidance/kibana-task-manager-scaling-considerations.md +++ b/deploy-manage/production-guidance/kibana-task-manager-scaling-considerations.md @@ -26,7 +26,7 @@ If you lose this index, all scheduled alerts and actions are lost. {{kib}} background tasks are managed as follows: -* An {{es}} task index is polled for overdue tasks at 3-second intervals. You can change this interval using the [`xpack.task_manager.poll_interval`](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-settings) setting. +* An {{es}} task index is polled for overdue tasks at 3-second intervals. You can change this interval using the [`xpack.task_manager.poll_interval`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md#task-manager-settings) setting. * Tasks are claimed by updating them in the {{es}} index, using optimistic concurrency control to prevent conflicts. Each {{kib}} instance can run a maximum of 10 concurrent tasks, so a maximum of 10 tasks are claimed each interval. * Tasks are run on the {{kib}} server. * Task Manager ensures that tasks: diff --git a/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md b/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md index 94d308429a..daa688a29d 100644 --- a/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md +++ b/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md @@ -12,7 +12,7 @@ Many of these recommendations help improve search speed. With approximate kNN, t ## Reduce vector memory foot-print [_reduce_vector_memory_foot_print] -The default [`element_type`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-element-type) is `float`. But this can be automatically quantized during index time through [`quantization`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-quantization). Quantization will reduce the required memory by 4x, 8x, or as much as 32x, but it will also reduce the precision of the vectors and increase disk usage for the field (by up to 25%, 12.5%, or 3.125%, respectively). Increased disk usage is a result of {{es}} storing both the quantized and the unquantized vectors. For example, when int8 quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. +The default [`element_type`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md#dense-vector-element-type) is `float`. But this can be automatically quantized during index time through [`quantization`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-quantization). Quantization will reduce the required memory by 4x, 8x, or as much as 32x, but it will also reduce the precision of the vectors and increase disk usage for the field (by up to 25%, 12.5%, or 3.125%, respectively). Increased disk usage is a result of {{es}} storing both the quantized and the unquantized vectors. For example, when int8 quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. For `float` vectors with `dim` greater than or equal to `384`, using a [`quantized`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-quantization) index is highly recommended. @@ -24,7 +24,7 @@ The speed of kNN search scales linearly with the number of vector dimensions, be ## Exclude vector fields from `_source` [_exclude_vector_fields_from_source] -{{es}} stores the original JSON document that was passed at index time in the [`_source` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html). By default, each hit in the search results contains the full document `_source`. When the documents contain high-dimensional `dense_vector` fields, the `_source` can be quite large and expensive to load. This could significantly slow down the speed of kNN search. +{{es}} stores the original JSON document that was passed at index time in the [`_source` field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md). By default, each hit in the search results contains the full document `_source`. When the documents contain high-dimensional `dense_vector` fields, the `_source` can be quite large and expensive to load. This could significantly slow down the speed of kNN search. ::::{note} [reindex](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex), [update](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update), and [update by query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) operations generally require the `_source` field. Disabling `_source` for a field might result in unexpected behavior for these operations. For example, reindex might not actually contain the `dense_vector` field in the new index. @@ -58,7 +58,7 @@ The data nodes should also leave a buffer for other ways that RAM is needed. For ## Warm up the filesystem cache [dense-vector-preloading] -If the machine running Elasticsearch is restarted, the filesystem cache will be empty, so it will take some time before the operating system loads hot regions of the index into memory so that search operations are fast. You can explicitly tell the operating system which files should be loaded into memory eagerly depending on the file extension using the [`index.store.preload`](https://www.elastic.co/guide/en/elasticsearch/reference/current/preload-data-to-file-system-cache.html) setting. +If the machine running Elasticsearch is restarted, the filesystem cache will be empty, so it will take some time before the operating system loads hot regions of the index into memory so that search operations are fast. You can explicitly tell the operating system which files should be loaded into memory eagerly depending on the file extension using the [`index.store.preload`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/preloading-data-into-file-system-cache.md) setting. ::::{warning} Loading data into the filesystem cache eagerly on too many indices or too many files will make search *slower* if the filesystem cache is not large enough to hold all the data. Use with caution. @@ -78,7 +78,7 @@ Generally, if you are using a quantized index, you should only preload the relev ## Reduce the number of index segments [_reduce_the_number_of_index_segments] -{{es}} shards are composed of segments, which are internal storage elements in the index. For approximate kNN search, {{es}} stores the vector values of each segment as a separate HNSW graph, so kNN search must check each segment. The recent parallelization of kNN search made it much faster to search across multiple segments, but still kNN search can be up to several times faster if there are fewer segments. By default, {{es}} periodically merges smaller segments into larger ones through a background [merge process](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html). If this isn’t sufficient, you can take explicit steps to reduce the number of index segments. +{{es}} shards are composed of segments, which are internal storage elements in the index. For approximate kNN search, {{es}} stores the vector values of each segment as a separate HNSW graph, so kNN search must check each segment. The recent parallelization of kNN search made it much faster to search across multiple segments, but still kNN search can be up to several times faster if there are fewer segments. By default, {{es}} periodically merges smaller segments into larger ones through a background [merge process](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/merge-scheduler-settings.md). If this isn’t sufficient, you can take explicit steps to reduce the number of index segments. ### Increase maximum segment size [_increase_maximum_segment_size] @@ -90,8 +90,8 @@ Generally, if you are using a quantized index, you should only preload the relev A common pattern is to first perform an initial bulk upload, then make an index available for searches. Instead of force merging, you can adjust the index settings to encourage {{es}} to create larger initial segments: -* Ensure there are no searches during the bulk upload and disable [`index.refresh_interval`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-refresh-interval-setting) by setting it to `-1`. This prevents refresh operations and avoids creating extra segments. -* Give {{es}} a large indexing buffer so it can accept more documents before flushing. By default, the [`indices.memory.index_buffer_size`](https://www.elastic.co/guide/en/elasticsearch/reference/current/indexing-buffer.html) is set to 10% of the heap size. With a substantial heap size like 32GB, this is often enough. To allow the full indexing buffer to be used, you should also increase the limit [`index.translog.flush_threshold_size`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-translog.html). +* Ensure there are no searches during the bulk upload and disable [`index.refresh_interval`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-refresh-interval-setting) by setting it to `-1`. This prevents refresh operations and avoids creating extra segments. +* Give {{es}} a large indexing buffer so it can accept more documents before flushing. By default, the [`indices.memory.index_buffer_size`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/indexing-buffer-settings.md) is set to 10% of the heap size. With a substantial heap size like 32GB, this is often enough. To allow the full indexing buffer to be used, you should also increase the limit [`index.translog.flush_threshold_size`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/translog-settings.md). ## Avoid heavy indexing during searches [_avoid_heavy_indexing_during_searches] @@ -103,7 +103,7 @@ When possible, it’s best to avoid heavy indexing during approximate kNN search ## Avoid page cache thrashing by using modest readahead values on Linux [_avoid_page_cache_thrashing_by_using_modest_readahead_values_on_linux_2] -Search can cause a lot of randomized read I/O. When the underlying block device has a high readahead value, there may be a lot of unnecessary read I/O done, especially when files are accessed using memory mapping (see [storage types](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html#file-system)). +Search can cause a lot of randomized read I/O. When the underlying block device has a high readahead value, there may be a lot of unnecessary read I/O done, especially when files are accessed using memory mapping (see [storage types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-store-settings.md#file-system)). Most Linux distributions use a sensible readahead value of `128KiB` for a single plain device, however, when using software raid, LVM or dm-crypt the resulting block device (backing Elasticsearch [path.data](../../deploy/self-managed/important-settings-configuration.md#path-settings)) may end up having a very large readahead value (in the range of several MiB). This usually results in severe page (filesystem) cache thrashing adversely affecting search (or [update](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-document)) performance. diff --git a/deploy-manage/production-guidance/optimize-performance/disk-usage.md b/deploy-manage/production-guidance/optimize-performance/disk-usage.md index e27cfd7080..0f233e74af 100644 --- a/deploy-manage/production-guidance/optimize-performance/disk-usage.md +++ b/deploy-manage/production-guidance/optimize-performance/disk-usage.md @@ -24,12 +24,12 @@ PUT index } ``` -[`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) fields store normalization factors in the index to facilitate document scoring. If you only need matching capabilities on a `text` field but do not care about the produced scores, you can use the [`match_only_text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html#match-only-text-field-type) type instead. This field type saves significant space by dropping scoring and positional information. +[`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) fields store normalization factors in the index to facilitate document scoring. If you only need matching capabilities on a `text` field but do not care about the produced scores, you can use the [`match_only_text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html#match-only-text-field-type) type instead. This field type saves significant space by dropping scoring and positional information. ## Don’t use default dynamic string mappings [default-dynamic-string-mapping] -The default [dynamic string mappings](../../../manage-data/data-store/mapping/dynamic-mapping.md) will index string fields both as [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html). This is wasteful if you only need one of them. Typically an `id` field will only need to be indexed as a `keyword` while a `body` field will only need to be indexed as a `text` field. +The default [dynamic string mappings](../../../manage-data/data-store/mapping/dynamic-mapping.md) will index string fields both as [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md). This is wasteful if you only need one of them. Typically an `id` field will only need to be indexed as a `keyword` while a `body` field will only need to be indexed as a `text` field. This can be disabled by either configuring explicit mappings on string fields or setting up dynamic templates that will map string fields as either `text` or `keyword`. @@ -63,12 +63,12 @@ Keep in mind that large shard sizes come with drawbacks, such as long full recov ## Disable `_source` [disable-source] -The [`_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html) field stores the original JSON body of the document. If you don’t need access to it you can disable it. However, APIs that needs access to `_source` such as update, highlight and reindex won’t work. +The [`_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md) field stores the original JSON body of the document. If you don’t need access to it you can disable it. However, APIs that needs access to `_source` such as update, highlight and reindex won’t work. ## Use `best_compression` [best-compression] -The `_source` and stored fields can easily take a non negligible amount of disk space. They can be compressed more aggressively by using the `best_compression` [codec](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-codec). +The `_source` and stored fields can easily take a non negligible amount of disk space. They can be compressed more aggressively by using the `best_compression` [codec](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-codec). ## Force merge [_force_merge] @@ -90,14 +90,14 @@ The [shrink API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/ope ## Use the smallest numeric type that is sufficient [_use_the_smallest_numeric_type_that_is_sufficient] -The type that you pick for [numeric data](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) can have a significant impact on disk usage. In particular, integers should be stored using an integer type (`byte`, `short`, `integer` or `long`) and floating points should either be stored in a `scaled_float` if appropriate or in the smallest type that fits the use-case: using `float` over `double`, or `half_float` over `float` will help save storage. +The type that you pick for [numeric data](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) can have a significant impact on disk usage. In particular, integers should be stored using an integer type (`byte`, `short`, `integer` or `long`) and floating points should either be stored in a `scaled_float` if appropriate or in the smallest type that fits the use-case: using `float` over `double`, or `half_float` over `float` will help save storage. ## Use index sorting to colocate similar documents [_use_index_sorting_to_colocate_similar_documents] When Elasticsearch stores `_source`, it compresses multiple documents at once in order to improve the overall compression ratio. For instance it is very common that documents share the same field names, and quite common that they share some field values, especially on fields that have a low cardinality or a [zipfian](https://en.wikipedia.org/wiki/Zipf%27s_law) distribution. -By default documents are compressed together in the order that they are added to the index. If you enabled [index sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting.html) then instead they are compressed in sorted order. Sorting documents with similar structure, fields, and values together should improve the compression ratio. +By default documents are compressed together in the order that they are added to the index. If you enabled [index sorting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-sorting-settings.md) then instead they are compressed in sorted order. Sorting documents with similar structure, fields, and values together should improve the compression ratio. ## Put fields in the same order in documents [_put_fields_in_the_same_order_in_documents] diff --git a/deploy-manage/production-guidance/optimize-performance/indexing-speed.md b/deploy-manage/production-guidance/optimize-performance/indexing-speed.md index 0f91149843..c1c42d653e 100644 --- a/deploy-manage/production-guidance/optimize-performance/indexing-speed.md +++ b/deploy-manage/production-guidance/optimize-performance/indexing-speed.md @@ -28,7 +28,7 @@ By default, Elasticsearch periodically refreshes indices every second, but only This is the optimal configuration if you have no or very little search traffic (e.g. less than one search request every 5 minutes) and want to optimize for indexing speed. This behavior aims to automatically optimize bulk indexing in the default case when no searches are performed. In order to opt out of this behavior set the refresh interval explicitly. -On the other hand, if your index experiences regular search requests, this default behavior means that Elasticsearch will refresh your index every 1 second. If you can afford to increase the amount of time between when a document gets indexed and when it becomes visible, increasing the [`index.refresh_interval`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-refresh-interval-setting) to a larger value, e.g. `30s`, might help improve indexing speed. +On the other hand, if your index experiences regular search requests, this default behavior means that Elasticsearch will refresh your index every 1 second. If you can afford to increase the amount of time between when a document gets indexed and when it becomes visible, increasing the [`index.refresh_interval`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-refresh-interval-setting) to a larger value, e.g. `30s`, might help improve indexing speed. ## Disable replicas for initial loads [_disable_replicas_for_initial_loads] @@ -69,7 +69,7 @@ Some remote storage performs very poorly, especially under the kind of load that ## Indexing buffer size [_indexing_buffer_size] -If your node is doing only heavy indexing, be sure [`indices.memory.index_buffer_size`](https://www.elastic.co/guide/en/elasticsearch/reference/current/indexing-buffer.html) is large enough to give at most 512 MB indexing buffer per shard doing heavy indexing (beyond that indexing performance does not typically improve). Elasticsearch takes that setting (a percentage of the java heap or an absolute byte-size), and uses it as a shared buffer across all active shards. Very active shards will naturally use this buffer more than shards that are performing lightweight indexing. +If your node is doing only heavy indexing, be sure [`indices.memory.index_buffer_size`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/indexing-buffer-settings.md) is large enough to give at most 512 MB indexing buffer per shard doing heavy indexing (beyond that indexing performance does not typically improve). Elasticsearch takes that setting (a percentage of the java heap or an absolute byte-size), and uses it as a shared buffer across all active shards. Very active shards will naturally use this buffer more than shards that are performing lightweight indexing. The default is `10%` which is often plenty: for example, if you give the JVM 10GB of memory, it will give 1GB to the index buffer, which is enough to host two shards that are heavily indexing. diff --git a/deploy-manage/production-guidance/optimize-performance/search-speed.md b/deploy-manage/production-guidance/optimize-performance/search-speed.md index beeb332ef5..ba32378c83 100644 --- a/deploy-manage/production-guidance/optimize-performance/search-speed.md +++ b/deploy-manage/production-guidance/optimize-performance/search-speed.md @@ -13,7 +13,7 @@ Elasticsearch heavily relies on the filesystem cache in order to make search fas ## Avoid page cache thrashing by using modest readahead values on Linux [_avoid_page_cache_thrashing_by_using_modest_readahead_values_on_linux] -Search can cause a lot of randomized read I/O. When the underlying block device has a high readahead value, there may be a lot of unnecessary read I/O done, especially when files are accessed using memory mapping (see [storage types](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html#file-system)). +Search can cause a lot of randomized read I/O. When the underlying block device has a high readahead value, there may be a lot of unnecessary read I/O done, especially when files are accessed using memory mapping (see [storage types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-store-settings.md#file-system)). Most Linux distributions use a sensible readahead value of `128KiB` for a single plain device, however, when using software raid, LVM or dm-crypt the resulting block device (backing Elasticsearch [path.data](../../deploy/self-managed/important-settings-configuration.md#path-settings)) may end up having a very large readahead value (in the range of several MiB). This usually results in severe page (filesystem) cache thrashing adversely affecting search (or [update](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-document)) performance. @@ -43,12 +43,12 @@ Some remote storage performs very poorly, especially under the kind of load that Documents should be modeled so that search-time operations are as cheap as possible. -In particular, joins should be avoided. [`nested`](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) can make queries several times slower and [parent-child](https://www.elastic.co/guide/en/elasticsearch/reference/current/parent-join.html) relations can make queries hundreds of times slower. So if the same questions can be answered without joins by denormalizing documents, significant speedups can be expected. +In particular, joins should be avoided. [`nested`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/nested.md) can make queries several times slower and [parent-child](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/parent-join.md) relations can make queries hundreds of times slower. So if the same questions can be answered without joins by denormalizing documents, significant speedups can be expected. ## Search as few fields as possible [search-as-few-fields-as-possible] -The more fields a [`query_string`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html) or [`multi_match`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html) query targets, the slower it is. A common technique to improve search speed over multiple fields is to copy their values into a single field at index time, and then use this field at search time. This can be automated with the [`copy-to`](https://www.elastic.co/guide/en/elasticsearch/reference/current/copy-to.html) directive of mappings without having to change the source of documents. Here is an example of an index containing movies that optimizes queries that search over both the name and the plot of the movie by indexing both values into the `name_and_plot` field. +The more fields a [`query_string`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-query-string-query.md) or [`multi_match`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-multi-match-query.md) query targets, the slower it is. A common technique to improve search speed over multiple fields is to copy their values into a single field at index time, and then use this field at search time. This can be automated with the [`copy-to`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/copy-to.md) directive of mappings without having to change the source of documents. Here is an example of an index containing movies that optimizes queries that search over both the name and the plot of the movie by indexing both values into the `name_and_plot` field. ```console PUT movies @@ -74,7 +74,7 @@ PUT movies ## Pre-index data [_pre_index_data] -You should leverage patterns in your queries to optimize the way data is indexed. For instance, if all your documents have a `price` field and most queries run [`range`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html) aggregations on a fixed list of ranges, you could make this aggregation faster by pre-indexing the ranges into the index and using a [`terms`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) aggregations. +You should leverage patterns in your queries to optimize the way data is indexed. For instance, if all your documents have a `price` field and most queries run [`range`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-range-aggregation.md) aggregations on a fixed list of ranges, you could make this aggregation faster by pre-indexing the ranges into the index and using a [`terms`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) aggregations. For instance, if documents look like: @@ -106,7 +106,7 @@ GET index/_search } ``` -Then documents could be enriched by a `price_range` field at index time, which should be mapped as a [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html): +Then documents could be enriched by a `price_range` field at index time, which should be mapped as a [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md): ```console PUT index @@ -146,7 +146,7 @@ GET index/_search ## Consider mapping identifiers as `keyword` [map-ids-as-keyword] -Not all numeric data should be mapped as a [numeric](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) field data type. {{es}} optimizes numeric fields, such as `integer` or `long`, for [`range`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) queries. However, [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields are better for [`term`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html) and other [term-level](https://www.elastic.co/guide/en/elasticsearch/reference/current/term-level-queries.html) queries. +Not all numeric data should be mapped as a [numeric](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) field data type. {{es}} optimizes numeric fields, such as `integer` or `long`, for [`range`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-range-query.md) queries. However, [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields are better for [`term`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-term-query.md) and other [term-level](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/term-level-queries.md) queries. Identifiers, such as an ISBN or a product ID, are rarely used in `range` queries. However, they are often retrieved using term-level queries. @@ -155,12 +155,12 @@ Consider mapping a numeric identifier as a `keyword` if: * You don’t plan to search for the identifier data using [`range`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) queries. * Fast retrieval is important. `term` query searches on `keyword` fields are often faster than `term` searches on numeric fields. -If you’re unsure which to use, you can use a [multi-field](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) to map the data as both a `keyword` *and* a numeric data type. +If you’re unsure which to use, you can use a [multi-field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md) to map the data as both a `keyword` *and* a numeric data type. ## Avoid scripts [_avoid_scripts] -If possible, avoid using [script](../../../explore-analyze/scripting.md)-based sorting, scripts in aggregations, and the [`script_score`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html) query. See [Scripts, caching, and search speed](../../../explore-analyze/scripting/scripts-search-speed.md). +If possible, avoid using [script](../../../explore-analyze/scripting.md)-based sorting, scripts in aggregations, and the [`script_score`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-score-query.md) query. See [Scripts, caching, and search speed](../../../explore-analyze/scripting/scripts-search-speed.md). ## Search rounded dates [_search_rounded_dates] @@ -274,7 +274,7 @@ Do not force-merge indices to which you are still writing, or to which you will ## Warm up global ordinals [_warm_up_global_ordinals] -[Global ordinals](https://www.elastic.co/guide/en/elasticsearch/reference/current/eager-global-ordinals.html) are a data structure that is used to optimize the performance of aggregations. They are calculated lazily and stored in the JVM heap as part of the [field data cache](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-fielddata.html). For fields that are heavily used for bucketing aggregations, you can tell {{es}} to construct and cache the global ordinals before requests are received. This should be done carefully because it will increase heap usage and can make [refreshes](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) take longer. The option can be updated dynamically on an existing mapping by setting the [eager global ordinals](https://www.elastic.co/guide/en/elasticsearch/reference/current/eager-global-ordinals.html) mapping parameter: +[Global ordinals](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/eager-global-ordinals.md) are a data structure that is used to optimize the performance of aggregations. They are calculated lazily and stored in the JVM heap as part of the [field data cache](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/field-data-cache-settings.md). For fields that are heavily used for bucketing aggregations, you can tell {{es}} to construct and cache the global ordinals before requests are received. This should be done carefully because it will increase heap usage and can make [refreshes](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) take longer. The option can be updated dynamically on an existing mapping by setting the [eager global ordinals](https://www.elastic.co/guide/en/elasticsearch/reference/current/eager-global-ordinals.html) mapping parameter: ```console PUT index @@ -293,7 +293,7 @@ PUT index ## Warm up the filesystem cache [_warm_up_the_filesystem_cache] -If the machine running Elasticsearch is restarted, the filesystem cache will be empty, so it will take some time before the operating system loads hot regions of the index into memory so that search operations are fast. You can explicitly tell the operating system which files should be loaded into memory eagerly depending on the file extension using the [`index.store.preload`](https://www.elastic.co/guide/en/elasticsearch/reference/current/preload-data-to-file-system-cache.html) setting. +If the machine running Elasticsearch is restarted, the filesystem cache will be empty, so it will take some time before the operating system loads hot regions of the index into memory so that search operations are fast. You can explicitly tell the operating system which files should be loaded into memory eagerly depending on the file extension using the [`index.store.preload`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/preloading-data-into-file-system-cache.md) setting. ::::{warning} Loading data into the filesystem cache eagerly on too many indices or too many files will make search *slower* if the filesystem cache is not large enough to hold all the data. Use with caution. @@ -303,12 +303,12 @@ Loading data into the filesystem cache eagerly on too many indices or too many f ## Use index sorting to speed up conjunctions [_use_index_sorting_to_speed_up_conjunctions] -[Index sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting.html) can be useful in order to make conjunctions faster at the cost of slightly slower indexing. Read more about it in the [index sorting documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting-conjunctions.html). +[Index sorting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-sorting-settings.md) can be useful in order to make conjunctions faster at the cost of slightly slower indexing. Read more about it in the [index sorting documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-modules-index-sorting-conjunctions.md). ## Use `preference` to optimize cache utilization [preference-cache-optimization] -There are multiple caches that can help with search performance, such as the [filesystem cache](https://en.wikipedia.org/wiki/Page_cache), the [request cache](https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-request-cache.html) or the [query cache](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-cache.html). Yet all these caches are maintained at the node level, meaning that if you run the same request twice in a row, have 1 replica or more and use [round-robin](https://en.wikipedia.org/wiki/Round-robin_DNS), the default routing algorithm, then those two requests will go to different shard copies, preventing node-level caches from helping. +There are multiple caches that can help with search performance, such as the [filesystem cache](https://en.wikipedia.org/wiki/Page_cache), the [request cache](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/shard-request-cache-settings.md) or the [query cache](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-query-cache-settings.md). Yet all these caches are maintained at the node level, meaning that if you run the same request twice in a row, have 1 replica or more and use [round-robin](https://en.wikipedia.org/wiki/Round-robin_DNS), the default routing algorithm, then those two requests will go to different shard copies, preventing node-level caches from helping. Since it is common for users of a search application to run similar requests one after another, for instance in order to analyze a narrower subset of the index, using a preference value that identifies the current user or session could help optimize usage of the caches. @@ -333,12 +333,12 @@ Because the Profile API itself adds significant overhead to the query, this info ## Faster phrase queries with `index_phrases` [faster-phrase-queries] -The [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field has an [`index_phrases`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-phrases.html) option that indexes 2-shingles and is automatically leveraged by query parsers to run phrase queries that don’t have a slop. If your use-case involves running lots of phrase queries, this can speed up queries significantly. +The [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) field has an [`index_phrases`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/index-phrases.md) option that indexes 2-shingles and is automatically leveraged by query parsers to run phrase queries that don’t have a slop. If your use-case involves running lots of phrase queries, this can speed up queries significantly. ## Faster prefix queries with `index_prefixes` [faster-prefix-queries] -The [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field has an [`index_prefixes`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-prefixes.html) option that indexes prefixes of all terms and is automatically leveraged by query parsers to run prefix queries. If your use-case involves running lots of prefix queries, this can speed up queries significantly. +The [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field has an [`index_prefixes`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/index-prefixes.md) option that indexes prefixes of all terms and is automatically leveraged by query parsers to run prefix queries. If your use-case involves running lots of prefix queries, this can speed up queries significantly. ## Use `constant_keyword` to speed up filtering [faster-filtering-with-constant-keyword] diff --git a/deploy-manage/production-guidance/optimize-performance/size-shards.md b/deploy-manage/production-guidance/optimize-performance/size-shards.md index b7bebf2a72..44f8306554 100644 --- a/deploy-manage/production-guidance/optimize-performance/size-shards.md +++ b/deploy-manage/production-guidance/optimize-performance/size-shards.md @@ -28,14 +28,14 @@ Keep the following things in mind when building your sharding strategy. ### Searches run on a single thread per shard [single-thread-per-shard] -Most searches hit multiple shards. Each shard runs the search on a single CPU thread. While a shard can run multiple concurrent searches, searches across a large number of shards can deplete a node’s [search thread pool](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html). This can result in low throughput and slow search speeds. +Most searches hit multiple shards. Each shard runs the search on a single CPU thread. While a shard can run multiple concurrent searches, searches across a large number of shards can deplete a node’s [search thread pool](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/thread-pool-settings.md). This can result in low throughput and slow search speeds. ### Each index, shard, segment and field has overhead [each-shard-has-overhead] Every index and every shard requires some memory and CPU resources. In most cases, a small set of large shards uses fewer resources than many small shards. -Segments play a big role in a shard’s resource usage. Most shards contain several segments, which store its index data. {{es}} keeps some segment metadata in heap memory so it can be quickly retrieved for searches. As a shard grows, its segments are [merged](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html) into fewer, larger segments. This decreases the number of segments, which means less metadata is kept in heap memory. +Segments play a big role in a shard’s resource usage. Most shards contain several segments, which store its index data. {{es}} keeps some segment metadata in heap memory so it can be quickly retrieved for searches. As a shard grows, its segments are [merged](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/merge-scheduler-settings.md) into fewer, larger segments. This decreases the number of segments, which means less metadata is kept in heap memory. Every mapped field also carries some overhead in terms of memory usage and disk space. By default {{es}} will automatically create a mapping for every field in every document it indexes, but you can switch off this behaviour to [take control of your mappings](../../../manage-data/data-store/mapping/explicit-mapping.md). @@ -67,8 +67,8 @@ One advantage of this setup is [automatic rollover](../../../manage-data/lifecyc {{ilm-init}} also makes it easy to change your sharding strategy over time: -* **Want to decrease the shard count for new indices?**
Change the [`index.number_of_shards`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-number-of-shards) setting in the data stream’s [matching index template](../../../manage-data/data-store/data-streams/modify-data-stream.md#data-streams-change-mappings-and-settings). -* **Want larger shards or fewer backing indices?**
Increase your {{ilm-init}} policy’s [rollover threshold](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-rollover.html). +* **Want to decrease the shard count for new indices?**
Change the [`index.number_of_shards`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-number-of-shards) setting in the data stream’s [matching index template](../../../manage-data/data-store/data-streams/modify-data-stream.md#data-streams-change-mappings-and-settings). +* **Want larger shards or fewer backing indices?**
Increase your {{ilm-init}} policy’s [rollover threshold](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-rollover.md). * **Need indices that span shorter intervals?**
Offset the increased shard count by deleting older indices sooner. You can do this by lowering the `min_age` threshold for your policy’s [delete phase](../../../manage-data/lifecycle/index-lifecycle-management/index-lifecycle.md). Every new backing index is an opportunity to further tune your strategy. @@ -133,7 +133,7 @@ GET _cat/shards?v=true ### Add enough nodes to stay within the cluster shard limits [shard-count-per-node-recommendation] -[Cluster shard limits](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-shard-limit) prevent creation of more than 1000 non-frozen shards per node, and 3000 frozen shards per dedicated frozen node. Make sure you have enough nodes of each type in your cluster to handle the number of shards you need. +[Cluster shard limits](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-shard-limit) prevent creation of more than 1000 non-frozen shards per node, and 3000 frozen shards per dedicated frozen node. Make sure you have enough nodes of each type in your cluster to handle the number of shards you need. ### Allow enough heap for field mappers and overheads [field-count-recommendation] @@ -223,7 +223,7 @@ Note that the above rules do not necessarily guarantee the performance of search If too many shards are allocated to a specific node, the node can become a hotspot. For example, if a single node contains too many shards for an index with a high indexing volume, the node is likely to have issues. -To prevent hotspots, use the [`index.routing.allocation.total_shards_per_node`](https://www.elastic.co/guide/en/elasticsearch/reference/current/allocation-total-shards.html#total-shards-per-node) index setting to explicitly limit the number of shards on a single node. You can configure `index.routing.allocation.total_shards_per_node` using the [update index settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings). +To prevent hotspots, use the [`index.routing.allocation.total_shards_per_node`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/total-shards-per-node.md#total-shards-per-node) index setting to explicitly limit the number of shards on a single node. You can configure `index.routing.allocation.total_shards_per_node` using the [update index settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings). ```console PUT my-index-000001/_settings @@ -237,7 +237,7 @@ PUT my-index-000001/_settings ### Avoid unnecessary mapped fields [avoid-unnecessary-fields] -By default {{es}} [automatically creates a mapping](../../../manage-data/data-store/mapping/dynamic-mapping.md) for every field in every document it indexes. Every mapped field corresponds to some data structures on disk which are needed for efficient search, retrieval, and aggregations on this field. Details about each mapped field are also held in memory. In many cases this overhead is unnecessary because a field is not used in any searches or aggregations. Use [*Explicit mapping*](../../../manage-data/data-store/mapping/explicit-mapping.md) instead of dynamic mapping to avoid creating fields that are never used. If a collection of fields are typically used together, consider using [`copy_to`](https://www.elastic.co/guide/en/elasticsearch/reference/current/copy-to.html) to consolidate them at index time. If a field is only rarely used, it may be better to make it a [Runtime field](../../../manage-data/data-store/mapping/runtime-fields.md) instead. +By default {{es}} [automatically creates a mapping](../../../manage-data/data-store/mapping/dynamic-mapping.md) for every field in every document it indexes. Every mapped field corresponds to some data structures on disk which are needed for efficient search, retrieval, and aggregations on this field. Details about each mapped field are also held in memory. In many cases this overhead is unnecessary because a field is not used in any searches or aggregations. Use [*Explicit mapping*](../../../manage-data/data-store/mapping/explicit-mapping.md) instead of dynamic mapping to avoid creating fields that are never used. If a collection of fields are typically used together, consider using [`copy_to`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/copy-to.md) to consolidate them at index time. If a field is only rarely used, it may be better to make it a [Runtime field](../../../manage-data/data-store/mapping/runtime-fields.md) instead. You can get information about which fields are being used with the [Field usage stats](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats) API, and you can analyze the disk usage of mapped fields using the [Analyze index disk usage](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) API. Note however that unnecessary mapped fields also carry some memory overhead as well as their disk usage. @@ -284,7 +284,7 @@ POST my-index-000001/_forcemerge If you no longer write to an index, you can use the [shrink index API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) to reduce its shard count. -{{ilm-init}} also has a [shrink action](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-shrink.html) for indices in the warm phase. +{{ilm-init}} also has a [shrink action](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-shrink.md) for indices in the warm phase. ### Combine smaller indices [combine-smaller-indices] diff --git a/deploy-manage/reference-architectures/hotfrozen-high-availability.md b/deploy-manage/reference-architectures/hotfrozen-high-availability.md index 98c9cc2d5d..1af8d83e97 100644 --- a/deploy-manage/reference-architectures/hotfrozen-high-availability.md +++ b/deploy-manage/reference-architectures/hotfrozen-high-availability.md @@ -10,7 +10,7 @@ applies: # Hot/Frozen - High Availability [hot-frozen-architecture] -The Hot/Frozen High Availability architecture is cost optimized for large time-series datasets. In this architecture, the hot tier is primarily used for indexing, searching, and continuity for automated processes. [Searchable snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots.html) are taken from hot into a repository, such as a cloud object store or an on-premises shared filesystem, and then cached to any desired volume on the local disks of the frozen tier. Data in the repository is indexed for fast retrieval and accessed on-demand from the frozen nodes. Index and snapshot lifecycle management are used to automate this process. +The Hot/Frozen High Availability architecture is cost optimized for large time-series datasets. In this architecture, the hot tier is primarily used for indexing, searching, and continuity for automated processes. [Searchable snapshots](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md) are taken from hot into a repository, such as a cloud object store or an on-premises shared filesystem, and then cached to any desired volume on the local disks of the frozen tier. Data in the repository is indexed for fast retrieval and accessed on-demand from the frozen nodes. Index and snapshot lifecycle management are used to automate this process. This architecture is ideal for time-series use cases, such as Observability or Security, that do not require updating. All the necessary components of the {{stack}} are included. This is not intended for sizing workloads, but rather as a basis to ensure that your cluster is ready to handle any desired workload with resiliency. A very high level representation of data flow is included, and for more detail around ingest architecture see our [ingest architectures](../../manage-data/ingest/ingest-reference-architectures/use-case-arch.md) documentation. @@ -22,7 +22,7 @@ This Hot/Frozen – High Availability architecture is intended for organizations * Have a requirement for cost effective long term data storage (many months or years). * Provide insights and alerts using logs, metrics, traces, or various event types to ensure optimal performance and quick issue resolution for applications. * Apply [machine learning anomaly detection](https://www.elastic.co/guide/en/kibana/current/xpack-ml-anomalies.html) to help detect patterns in time series data to find root cause and resolve problems faster. -* Use an AI assistant ([Observability](https://www.elastic.co/guide/en/observability/current/obs-ai-assistant.html), [Security](https://www.elastic.co/guide/en/security/current/security-assistant.html), or [Playground](https://www.elastic.co/guide/en/kibana/current/playground.html)) for investigation, incident response, reporting, query generation, or query conversion from other languages using natural language. +* Use an AI assistant ([Observability](https://www.elastic.co/guide/en/observability/current/obs-ai-assistant.html), [Security](/solutions/security/ai/ai-assistant.md), or [Playground](/solutions/search/rag/playground.md)) for investigation, incident response, reporting, query generation, or query conversion from other languages using natural language. * Deploy an architecture model that allows for maximum flexibility between storage cost and performance. ::::{important} @@ -46,7 +46,7 @@ We use an Availability Zone (AZ) concept in the architecture above. When running :::: -The diagram illustrates an {{es}} cluster deployed across 3 availability zones (AZ). For production we recommend a minimum of 2 availability zones and 3 availability zones for mission critical applications. See [Plan for production](https://www.elastic.co/guide/en/cloud/current/ec-planning.html) for more details. A cluster that is running in {{ecloud}} that has data nodes in only two AZs will create a third master-eligible node in a third AZ. High availability cannot be achieved without three zones for any distributed computing technology. +The diagram illustrates an {{es}} cluster deployed across 3 availability zones (AZ). For production we recommend a minimum of 2 availability zones and 3 availability zones for mission critical applications. See [Plan for production](/deploy-manage/production-guidance/plan-for-production-elastic-cloud.md) for more details. A cluster that is running in {{ecloud}} that has data nodes in only two AZs will create a third master-eligible node in a third AZ. High availability cannot be achieved without three zones for any distributed computing technology. The number of data nodes shown for each tier (hot and frozen) is illustrative and would be scaled up depending on ingest volume and retention period. Hot nodes contain both primary and replica shards. By default, primary and replica shards are always guaranteed to be in different availability zones in {{ess}}, but when self-deploying [shard allocation awareness](../distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md) would need to be configured. Frozen nodes act as a large high-speed cache and retrieve data from the snapshot store as needed. @@ -55,7 +55,7 @@ Machine learning nodes are optional but highly recommended for large scale time ## Recommended hardware specifications [hot-frozen-hardware] -With {{ech}}, you can deploy clusters in AWS, Azure, and Google Cloud. Available hardware types and configurations vary across all three cloud providers but each provides instance types that meet our recommendations for the node types used in this architecture. For more details on these instance types, see our documentation on {{ech}} hardware for [AWS](https://www.elastic.co/guide/en/cloud/current/ec-default-aws-configurations.html), [Azure](https://www.elastic.co/guide/en/cloud/current/ec-default-azure-configurations.html), and [GCP](https://www.elastic.co/guide/en/cloud/current/ec-default-gcp-configurations.html). The **Physical** column below is guidance, based on the cloud node types, when self-deploying {{es}} in your own data center. +With {{ech}}, you can deploy clusters in AWS, Azure, and Google Cloud. Available hardware types and configurations vary across all three cloud providers but each provides instance types that meet our recommendations for the node types used in this architecture. For more details on these instance types, see our documentation on {{ech}} hardware for [AWS](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/aws-default.md), [Azure](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/azure-default.md), and [GCP](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/gcp-default-provider.md). The **Physical** column below is guidance, based on the cloud node types, when self-deploying {{es}} in your own data center. In the links provided above, Elastic has performance tested hardware for each of the cloud providers to find the optimal hardware for each node type. We use ratios to represent the best mix of CPU, RAM, and disk for each type. In some cases the CPU to RAM ratio is key, in others the disk to memory ratio and type of disk is critical. Significantly deviating from these ratios may seem like a way to save on hardware costs, but may result in an {{es}} cluster that does not scale and perform well. @@ -83,16 +83,16 @@ This table shows our specific recommendations for nodes in a Hot/Frozen architec **Shard management:** -* The most important foundational step to maintaining performance as you scale is proper shard management. This includes even shard distribution amongst nodes, shard size, and shard count. For a complete understanding of what shards are and how they should be used, refer to [Size your shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html). +* The most important foundational step to maintaining performance as you scale is proper shard management. This includes even shard distribution amongst nodes, shard size, and shard count. For a complete understanding of what shards are and how they should be used, refer to [Size your shards](/deploy-manage/production-guidance/optimize-performance/size-shards.md). **Snapshots:** -* If auditable or business critical events are being logged, a backup is necessary. The choice to back up data will depend on each individual business’s needs and requirements. Refer to our [snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html) documentation to learn more. -* To automate snapshots and attach to Index lifecycle management policies, refer to [SLM (Snapshot lifecycle management)](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-take-snapshot.html#automate-snapshots-slm). +* If auditable or business critical events are being logged, a backup is necessary. The choice to back up data will depend on each individual business’s needs and requirements. Refer to our [snapshot repository](/deploy-manage/tools/snapshot-and-restore/self-managed.md) documentation to learn more. +* To automate snapshots and attach to Index lifecycle management policies, refer to [SLM (Snapshot lifecycle management)](/deploy-manage/tools/snapshot-and-restore/create-snapshots.md#automate-snapshots-slm). **Kibana:** -* If self-deploying outside of {{ess}}, ensure that {{kib}} is configured for [high availability](https://www.elastic.co/guide/en/kibana/current/production.html#high-availability). +* If self-deploying outside of {{ess}}, ensure that {{kib}} is configured for [high availability](/deploy-manage/production-guidance/kibana-in-production-environments.md#high-availability). ## How many nodes of each do you need? [hot-frozen-estimate] @@ -110,5 +110,5 @@ You can [contact us](https://www.elastic.co/contact) for an estimate and recomme ## Resources and references [hot-frozen-resources] * [{{es}} - Get ready for production](https://www.elastic.co/guide/en/elasticsearch/reference/current/scalability.html) -* [{{ess}} - Preparing a deployment for production](https://www.elastic.co/guide/en/cloud/current/ec-prepare-production.html) +* [{{ess}} - Preparing a deployment for production](/deploy-manage/deploy/elastic-cloud/cloud-hosted.md) * [Size your shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html) diff --git a/deploy-manage/remote-clusters/ec-enable-ccs-for-eck.md b/deploy-manage/remote-clusters/ec-enable-ccs-for-eck.md index 1aaa6a8eb4..f3207d4521 100644 --- a/deploy-manage/remote-clusters/ec-enable-ccs-for-eck.md +++ b/deploy-manage/remote-clusters/ec-enable-ccs-for-eck.md @@ -5,7 +5,7 @@ mapped_pages: # Enabling CCS/R between Elasticsearch Service and ECK [ec-enable-ccs-for-eck] -These steps describe how to configure remote clusters between an {{es}} cluster in Elasticsearch Service and an {{es}} cluster running within [Elastic Cloud on Kubernetes (ECK)](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-overview.html). Once that’s done, you’ll be able to [run CCS queries from {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html) or [set up CCR](https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-getting-started-tutorial.html). +These steps describe how to configure remote clusters between an {{es}} cluster in Elasticsearch Service and an {{es}} cluster running within [Elastic Cloud on Kubernetes (ECK)](/deploy-manage/deploy/cloud-on-k8s.md). Once that’s done, you’ll be able to [run CCS queries from {{es}}](/solutions/search/cross-cluster-search.md) or [set up CCR](/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md). ## Establish trust between two clusters [ec_establish_trust_between_two_clusters] @@ -85,4 +85,4 @@ Configure the ECK cluster [using certificate based authentication](ec-remote-clu ### Elasticsearch Service cluster to ECK Cluster [ec_elasticsearch_service_cluster_to_eck_cluster] -Follow the steps outlined in the [ECK documentation](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-remote-clusters.html#k8s_configure_the_remote_cluster_connection_through_the_elasticsearch_rest_api). +Follow the steps outlined in the [ECK documentation](/deploy-manage/remote-clusters/eck-remote-clusters.md#k8s_configure_the_remote_cluster_connection_through_the_elasticsearch_rest_api). diff --git a/deploy-manage/remote-clusters/ec-enable-ccs.md b/deploy-manage/remote-clusters/ec-enable-ccs.md index f1ab4c063a..ceb1506f92 100644 --- a/deploy-manage/remote-clusters/ec-enable-ccs.md +++ b/deploy-manage/remote-clusters/ec-enable-ccs.md @@ -5,9 +5,9 @@ mapped_pages: # Enable cross-cluster search and cross-cluster replication [ec-enable-ccs] -[Cross-cluster search (CCS)](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html) allows you to configure multiple remote clusters across different locations and to enable federated search queries across all of the configured remote clusters. +[Cross-cluster search (CCS)](/solutions/search/cross-cluster-search.md) allows you to configure multiple remote clusters across different locations and to enable federated search queries across all of the configured remote clusters. -[Cross-cluster replication (CCR)](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-ccr.html) allows you to replicate indices across multiple remote clusters regardless of where they’re located. This provides tremendous benefit in scenarios of disaster recovery or data locality. +[Cross-cluster replication (CCR)](/deploy-manage/tools/cross-cluster-replication.md) allows you to replicate indices across multiple remote clusters regardless of where they’re located. This provides tremendous benefit in scenarios of disaster recovery or data locality. These remote clusters could be: @@ -21,7 +21,7 @@ These remote clusters could be: To use CCS or CCR, your deployments must meet the following criteria: -* Local and remote clusters must be in compatible versions. Review the [{{es}} version compatibility](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters-cert.html#remote-clusters-prerequisites-cert) table. +* Local and remote clusters must be in compatible versions. Review the [{{es}} version compatibility](/deploy-manage/remote-clusters/remote-clusters-cert.md#remote-clusters-prerequisites-cert) table. The steps, information, and authentication method required to configure CCS and CCR can vary depending on where the clusters you want to use as remote are hosted. @@ -37,8 +37,8 @@ The steps, information, and authentication method required to configure CCS and * [From another deployment of your Elasticsearch Service organization](ec-remote-cluster-same-ess.md) * [From a deployment of another Elasticsearch Service organization](ec-remote-cluster-other-ess.md) - * [From an ECE deployment](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html) - * [From a self-managed cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html) + * [From an ECE deployment](/deploy-manage/remote-clusters/ece-enable-ccs.md) + * [From a self-managed cluster](/deploy-manage/remote-clusters/remote-clusters-self-managed.md) diff --git a/deploy-manage/remote-clusters/ec-migrate-ccs.md b/deploy-manage/remote-clusters/ec-migrate-ccs.md index f1f18d0009..a4dbf6183c 100644 --- a/deploy-manage/remote-clusters/ec-migrate-ccs.md +++ b/deploy-manage/remote-clusters/ec-migrate-ccs.md @@ -19,7 +19,7 @@ You can use a PUT request to update your deployment, changing both the deploymen 1. First, choose the new template you want to use and obtain its ID. This template ID can be obtained from the [Elasticsearch Service Console](https://cloud.elastic.co?page=docs&placement=docs-body) **Create Deployment** page by selecting **Equivalent API request** and inspecting the result for the field `deployment_template`. For example, we are going to use the "Storage optimized" deployment template, and in our GCP region the id is `gcp-storage-optimized-v5`. - You can also find the template in the [list of templates available for each region](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html). + You can also find the template in the [list of templates available for each region](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md). :::{image} ../../images/cloud-ec-migrate-deployment-template(2).png :alt: Deployment Template ID diff --git a/deploy-manage/remote-clusters/ec-remote-cluster-ece.md b/deploy-manage/remote-clusters/ec-remote-cluster-ece.md index 060216d95d..31851cdbff 100644 --- a/deploy-manage/remote-clusters/ec-remote-cluster-ece.md +++ b/deploy-manage/remote-clusters/ec-remote-cluster-ece.md @@ -36,7 +36,7 @@ A deployment can be configured to trust all or specific deployments in a remote 7. Provide a name for the trusted environment. That name will appear in the trust summary of your deployment’s Security page. 8. Select **Create trust** to complete the configuration. -9. Configure the corresponding deployments of the ECE environment to [trust this deployment](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html). You will only be able to connect 2 deployments successfully when both of them trust each other. +9. Configure the corresponding deployments of the ECE environment to [trust this deployment](/deploy-manage/remote-clusters/ece-enable-ccs.md). You will only be able to connect 2 deployments successfully when both of them trust each other. Note that the environment ID and cluster IDs must be entered fully and correctly. For security reasons, no verification of the IDs is possible. If cross-environment trust does not appear to be working, double-checking the IDs is a good place to start. @@ -212,7 +212,7 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. @@ -258,7 +258,7 @@ This section only applies if you’re using TLS certificates as cross-cluster se :::: -When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: +When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: * **Proxy**: This value can be found on the **Security** page of the deployment you want to use as a remote under the name `Proxy Address`. Also, using the API, this can be obtained from the elasticsearch resource info, concatenating the fields `metadata.endpoint` and `metadata.ports.transport_passthrough` using a semicolon. * **Seeds**: This field is an array that must contain only one value, which is the `server name` that can be found on the **Security** page of the {{es}} deployment you want to use as a remote concatenated with `:1`. Also, using the API, this can be obtained from the {{es}} resource info, concatenating the fields `metadata.endpoint` and `1` with a semicolon. @@ -317,7 +317,7 @@ curl -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC_AP Note the following when using the Elasticsearch Service RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ec-remote-cluster-other-ess.md b/deploy-manage/remote-clusters/ec-remote-cluster-other-ess.md index 2f918911c7..4892d367ac 100644 --- a/deploy-manage/remote-clusters/ec-remote-cluster-other-ess.md +++ b/deploy-manage/remote-clusters/ec-remote-cluster-other-ess.md @@ -148,7 +148,7 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. @@ -194,7 +194,7 @@ This section only applies if you’re using TLS certificates as cross-cluster se :::: -When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: +When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: * **Proxy**: This value can be found on the **Security** page of the deployment you want to use as a remote under the name `Proxy Address`. Also, using the API, this can be obtained from the elasticsearch resource info, concatenating the fields `metadata.endpoint` and `metadata.ports.transport_passthrough` using a semicolon. * **Seeds**: This field is an array that must contain only one value, which is the `server name` that can be found on the **Security** page of the {{es}} deployment you want to use as a remote concatenated with `:1`. Also, using the API, this can be obtained from the {{es}} resource info, concatenating the fields `metadata.endpoint` and `1` with a semicolon. @@ -253,7 +253,7 @@ curl -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC_AP Note the following when using the Elasticsearch Service RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ec-remote-cluster-same-ess.md b/deploy-manage/remote-clusters/ec-remote-cluster-same-ess.md index 7e0447ee2d..9abadc3bcb 100644 --- a/deploy-manage/remote-clusters/ec-remote-cluster-same-ess.md +++ b/deploy-manage/remote-clusters/ec-remote-cluster-same-ess.md @@ -24,7 +24,7 @@ TLS certificate By default, any deployment that you create trusts all other deployments in the same organization. You can change this behavior in the [Elasticsearch Service Console](https://cloud.elastic.co?page=docs&placement=docs-body) under **Features** > **Trust**, so that when a new deployment is created it does not automatically trust any other deployment. You can choose one of the following options: -* Trust all my deployments - All of your organization’s deployments created while this option is selected already trust each other. If you keep this option, that includes any deployments you’ll create in the future. You can directly jump to [Connect to the remote cluster](https://www.elastic.co/guide/en/cloud/current/ec-remote-cluster-same-ess.html#ec_connect_to_the_remote_cluster) to finalize the CCS or CCR configuration. +* Trust all my deployments - All of your organization’s deployments created while this option is selected already trust each other. If you keep this option, that includes any deployments you’ll create in the future. You can directly jump to [Connect to the remote cluster](/deploy-manage/remote-clusters/ec-remote-cluster-same-ess.md#ec_connect_to_the_remote_cluster) to finalize the CCS or CCR configuration. * Trust no deployment - New deployments won’t trust any other deployment when they are created. You can instead configure trust individually for each of them in their security settings, as described in the next section. :::{image} ../../images/cloud-ec-account-trust-management.png @@ -54,7 +54,7 @@ If your organization’s deployments already trust each other by default, you ca ::::{note} -When trusting specific deployments, the more restrictive [CCS](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) version policy is used (even if you only want to use [CCR](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-ccr.html)). To work around this restriction for CCR-only trust, it is necessary to use the API as described below. +When trusting specific deployments, the more restrictive [CCS](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) version policy is used (even if you only want to use [CCR](/deploy-manage/tools/cross-cluster-replication.md)). To work around this restriction for CCR-only trust, it is necessary to use the API as described below. :::: @@ -185,7 +185,7 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. @@ -290,7 +290,7 @@ curl -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC_AP Note the following when using the Elasticsearch Service RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ec-remote-cluster-self-managed.md b/deploy-manage/remote-clusters/ec-remote-cluster-self-managed.md index c9a1fe0508..9079c44b72 100644 --- a/deploy-manage/remote-clusters/ec-remote-cluster-self-managed.md +++ b/deploy-manage/remote-clusters/ec-remote-cluster-self-managed.md @@ -41,7 +41,7 @@ A deployment can be configured to trust all or specific deployments in any envir 7. Configure the self-managed cluster to trust this deployment, so that both deployments are configured to trust each other: * Download the Certificate Authority used to sign the certificates of your deployment nodes (it can be found in the Security page of your deployment) - * Trust this CA either using the [setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html) `xpack.security.transport.ssl.certificate_authorities` in `elasticsearch.yml` or by [adding it to the trust store](../security/different-ca.md). + * Trust this CA either using the [setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md) `xpack.security.transport.ssl.certificate_authorities` in `elasticsearch.yml` or by [adding it to the trust store](../security/different-ca.md). 8. Generate certificates with an `otherName` attribute using the Elasticsearch certutil. Create a file called `instances.yaml` with all the details of the nodes in your on-premise cluster like below. The `dns` and `ip` settings are optional, but `cn` is mandatory for use with the `trust_restrictions` path setting in the next step. Next, run `./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 -in instances.yaml` to create new certificates for all the nodes at once. You can then copy the resulting files into each node. @@ -239,7 +239,7 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. @@ -285,7 +285,7 @@ This section only applies if you’re using TLS certificates as cross-cluster se :::: -When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: +When the cluster to be configured as a remote is above 6.7.0 and below 7.6.0, the remote cluster must be configured using the [sniff mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) with the proxy field. For each remote cluster you need to pass the following fields: * **Proxy**: This value can be found on the **Security** page of the deployment you want to use as a remote under the name `Proxy Address`. Also, using the API, this can be obtained from the elasticsearch resource info, concatenating the fields `metadata.endpoint` and `metadata.ports.transport_passthrough` using a semicolon. * **Seeds**: This field is an array that must contain only one value, which is the `server name` that can be found on the **Security** page of the {{es}} deployment you want to use as a remote concatenated with `:1`. Also, using the API, this can be obtained from the {{es}} resource info, concatenating the fields `metadata.endpoint` and `1` with a semicolon. @@ -344,7 +344,7 @@ curl -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC_AP Note the following when using the Elasticsearch Service RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ece-enable-ccs-for-eck.md b/deploy-manage/remote-clusters/ece-enable-ccs-for-eck.md index 23b05068ba..70cfae6ecf 100644 --- a/deploy-manage/remote-clusters/ece-enable-ccs-for-eck.md +++ b/deploy-manage/remote-clusters/ece-enable-ccs-for-eck.md @@ -5,7 +5,7 @@ mapped_pages: # Enabling CCS/R between Elastic Cloud Enterprise and ECK [ece-enable-ccs-for-eck] -These steps describe how to configure remote clusters between an {{es}} cluster in Elastic Cloud Enterprise and an {{es}} cluster running within [Elastic Cloud on Kubernetes (ECK)](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-overview.html). Once that’s done, you’ll be able to [run CCS queries from {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html) or [set up CCR](https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-getting-started-tutorial.html). +These steps describe how to configure remote clusters between an {{es}} cluster in Elastic Cloud Enterprise and an {{es}} cluster running within [Elastic Cloud on Kubernetes (ECK)](/deploy-manage/deploy/cloud-on-k8s.md). Once that’s done, you’ll be able to [run CCS queries from {{es}}](/solutions/search/cross-cluster-search.md) or [set up CCR](/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md). ## Establish trust between two clusters [ece_establish_trust_between_two_clusters] @@ -85,4 +85,4 @@ Configure the ECK cluster [using certificate based authentication](ece-remote-cl ### Elastic Cloud Enterprise cluster to ECK Cluster [ece_elastic_cloud_enterprise_cluster_to_eck_cluster] -Follow the steps outlined in the [ECK documentation](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-remote-clusters.html#k8s_configure_the_remote_cluster_connection_through_the_elasticsearch_rest_api). +Follow the steps outlined in the [ECK documentation](/deploy-manage/remote-clusters/eck-remote-clusters.md#k8s_configure_the_remote_cluster_connection_through_the_elasticsearch_rest_api). diff --git a/deploy-manage/remote-clusters/ece-enable-ccs.md b/deploy-manage/remote-clusters/ece-enable-ccs.md index d1a854edfd..a11864d98f 100644 --- a/deploy-manage/remote-clusters/ece-enable-ccs.md +++ b/deploy-manage/remote-clusters/ece-enable-ccs.md @@ -5,9 +5,9 @@ mapped_pages: # Enable cross-cluster search and cross-cluster replication [ece-enable-ccs] -[Cross-cluster search (CCS)](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html) allows you to configure multiple remote clusters across different locations and to enable federated search queries across all of the configured remote clusters. +[Cross-cluster search (CCS)](/solutions/search/cross-cluster-search.md) allows you to configure multiple remote clusters across different locations and to enable federated search queries across all of the configured remote clusters. -[Cross-cluster replication (CCR)](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-ccr.html) allows you to replicate indices across multiple remote clusters regardless of where they’re located. This provides tremendous benefit in scenarios of disaster recovery or data locality. +[Cross-cluster replication (CCR)](/deploy-manage/tools/cross-cluster-replication.md) allows you to replicate indices across multiple remote clusters regardless of where they’re located. This provides tremendous benefit in scenarios of disaster recovery or data locality. These remote clusters could be: @@ -21,7 +21,7 @@ These remote clusters could be: To use CCS or CCR, your environment must meet the following criteria: -* Local and remote clusters must be in compatible versions. Review the [{{es}} version compatibility](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters-cert.html#remote-clusters-prerequisites-cert) table. +* Local and remote clusters must be in compatible versions. Review the [{{es}} version compatibility](/deploy-manage/remote-clusters/remote-clusters-cert.md#remote-clusters-prerequisites-cert) table. * System deployments cannot be used as remote clusters or have remote clusters. @@ -42,8 +42,8 @@ The steps, information, and authentication method required to configure CCS and * [From another deployment of the same Elastic Cloud Enterprise environment](ece-remote-cluster-same-ece.md) * [From a deployment of another Elastic Cloud Enterprise environment](ece-remote-cluster-other-ece.md) - * [From an {{ess}} deployment](https://www.elastic.co/guide/en/cloud/current/ec-remote-cluster-ece.html) - * [From a self-managed cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html) + * [From an {{ess}} deployment](/deploy-manage/remote-clusters/ec-remote-cluster-ece.md) + * [From a self-managed cluster](/deploy-manage/remote-clusters/remote-clusters-self-managed.md) diff --git a/deploy-manage/remote-clusters/ece-migrate-ccs.md b/deploy-manage/remote-clusters/ece-migrate-ccs.md index 52aa629a74..1b788c0a72 100644 --- a/deploy-manage/remote-clusters/ece-migrate-ccs.md +++ b/deploy-manage/remote-clusters/ece-migrate-ccs.md @@ -23,5 +23,5 @@ You can make this change in the user Cloud UI. The only drawback of this method :class: screenshot ::: -4. Finally, [configure the remote clusters](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-remote-cluster-other-ece.html). +4. Finally, [configure the remote clusters](/deploy-manage/remote-clusters/ece-remote-cluster-other-ece.md). diff --git a/deploy-manage/remote-clusters/ece-remote-cluster-ece-ess.md b/deploy-manage/remote-clusters/ece-remote-cluster-ece-ess.md index c380a786d2..cd1154aba9 100644 --- a/deploy-manage/remote-clusters/ece-remote-cluster-ece-ess.md +++ b/deploy-manage/remote-clusters/ece-remote-cluster-ece-ess.md @@ -33,7 +33,7 @@ A deployment can be configured to trust all or specific deployments from an orga * All deployments - This deployment trusts all deployments in the organization in the regions whose certificate authorities have been uploaded, including new deployments when they are created. * Specific deployments - Specify which of the existing deployments you want to trust from this organization. The full Elasticsearch cluster ID must be entered for each remote cluster. The Elasticsearch `Cluster ID` can be found in the deployment overview page under **Applications**. -5. Configure the deployment in {{ecloud}} to [trust this deployment](https://www.elastic.co/guide/en/cloud/current/ec-remote-cluster-ece.html#ec-trust-ece), so that both deployments are configured to trust each other. +5. Configure the deployment in {{ecloud}} to [trust this deployment](/deploy-manage/remote-clusters/ec-remote-cluster-ece.md#ec-trust-ece), so that both deployments are configured to trust each other. Note that the organization ID and cluster IDs must be entered fully and correctly. For security reasons, no verification of the IDs is possible. If cross-environment trust does not appear to be working, double-checking the IDs is a good place to start. @@ -154,14 +154,14 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. 5. Click **Add remote cluster** (you have already established trust in a previous step). ::::{note} -This configuration of remote clusters uses the [Proxy mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#proxy-mode) and it requires that the allocators can communicate via http with the proxies. +This configuration of remote clusters uses the [Proxy mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#proxy-mode) and it requires that the allocators can communicate via http with the proxies. :::: @@ -264,7 +264,7 @@ curl -k -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC Note the following when using the Elastic Cloud Enterprise RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ece-remote-cluster-other-ece.md b/deploy-manage/remote-clusters/ece-remote-cluster-other-ece.md index e13b3fce35..4b7953bce2 100644 --- a/deploy-manage/remote-clusters/ece-remote-cluster-other-ece.md +++ b/deploy-manage/remote-clusters/ece-remote-cluster-other-ece.md @@ -44,7 +44,7 @@ Now, deployments in those environments will be able to configure trust with depl * Specific deployments - Specify which of the existing deployments you want to trust in the ECE environment. The full Elasticsearch cluster ID must be entered for each remote cluster. The Elasticsearch `Cluster ID` can be found in the deployment overview page under **Applications**. 6. Select **Create trust** to complete the configuration. -7. Configure the corresponding deployments of the ECE environment to [trust this deployment](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html). You will only be able to connect 2 deployments successfully when both of them trust each other. +7. Configure the corresponding deployments of the ECE environment to [trust this deployment](/deploy-manage/remote-clusters/ece-enable-ccs.md). You will only be able to connect 2 deployments successfully when both of them trust each other. Note that the environment ID and cluster IDs must be entered fully and correctly. For security reasons, no verification of the IDs is possible. If cross-environment trust does not appear to be working, double-checking the IDs is a good place to start. @@ -232,14 +232,14 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. 5. Click **Add remote cluster** (you have already established trust in a previous step). ::::{note} -This configuration of remote clusters uses the [Proxy mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#proxy-mode) and it requires that the allocators can communicate via http with the proxies. +This configuration of remote clusters uses the [Proxy mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#proxy-mode) and it requires that the allocators can communicate via http with the proxies. :::: @@ -342,7 +342,7 @@ curl -k -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC Note the following when using the Elastic Cloud Enterprise RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ece-remote-cluster-same-ece.md b/deploy-manage/remote-clusters/ece-remote-cluster-same-ece.md index 3c434e9918..e1170c59a6 100644 --- a/deploy-manage/remote-clusters/ece-remote-cluster-same-ece.md +++ b/deploy-manage/remote-clusters/ece-remote-cluster-same-ece.md @@ -25,7 +25,7 @@ TLS certificate By default, any deployment that you or your users create trusts all other deployments in the same Elastic Cloud Enterprise environment. You can change this behavior in the Cloud UI under **Platform** > **Trust Management**, so that when a new deployment is created it does not automatically trust any other deployment. You can choose one of the following options: -* Trust all my deployments - All of your organization’s deployments created while this option is selected already trust each other. If you keep this option, that includes any deployments you’ll create in the future. You can directly jump to [Connect to the remote cluster](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-remote-cluster-same-ece.html#ece_connect_to_the_remote_cluster) to finalize the CCS or CCR configuration. +* Trust all my deployments - All of your organization’s deployments created while this option is selected already trust each other. If you keep this option, that includes any deployments you’ll create in the future. You can directly jump to [Connect to the remote cluster](/deploy-manage/remote-clusters/ece-remote-cluster-same-ece.md#ece_connect_to_the_remote_cluster) to finalize the CCS or CCR configuration. * Trust no deployment - New deployments won’t trust any other deployment when they are created. You can instead configure trust individually for each of them in their security settings, as described in the next section. :::{image} ../../images/cloud-enterprise-ce-environment-trust-management.png @@ -55,7 +55,7 @@ If your organization’s deployments already trust each other by default, you ca ::::{note} -When trusting specific deployments, the more restrictive [CCS](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) version policy is used (even if you only want to use [CCR](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-ccr.html)). To work around this restriction for CCR-only trust, it is necessary to use the API as described below. +When trusting specific deployments, the more restrictive [CCS](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) version policy is used (even if you only want to use [CCR](/deploy-manage/tools/cross-cluster-replication.md)). To work around this restriction for CCR-only trust, it is necessary to use the API as described below. :::: @@ -184,7 +184,7 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. @@ -294,7 +294,7 @@ curl -k -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC Note the following when using the Elastic Cloud Enterprise RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/ece-remote-cluster-self-managed.md b/deploy-manage/remote-clusters/ece-remote-cluster-self-managed.md index c659f85019..aadddf5ab4 100644 --- a/deploy-manage/remote-clusters/ece-remote-cluster-self-managed.md +++ b/deploy-manage/remote-clusters/ece-remote-cluster-self-managed.md @@ -42,7 +42,7 @@ A deployment can be configured to trust all or specific deployments in any envir 7. Configure the self-managed cluster to trust this deployment, so that both deployments are configured to trust each other: * Download the Certificate Authority used to sign the certificates of your deployment nodes (it can be found in the Security page of your deployment) - * Trust this CA either using the [setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html) `xpack.security.transport.ssl.certificate_authorities` in `elasticsearch.yml` or by [adding it to the trust store](../security/different-ca.md). + * Trust this CA either using the [setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md) `xpack.security.transport.ssl.certificate_authorities` in `elasticsearch.yml` or by [adding it to the trust store](../security/different-ca.md). 8. Generate certificates with an `otherName` attribute using the Elasticsearch certutil. Create a file called `instances.yaml` with all the details of the nodes in your on-premise cluster like below. The `dns` and `ip` settings are optional, but `cn` is mandatory for use with the `trust_restrictions` path setting in the next step. Next, run `./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 -in instances.yaml` to create new certificates for all the nodes at once. You can then copy the resulting files into each node. @@ -237,14 +237,14 @@ On the local cluster, add the remote cluster using Kibana or the {{es}} API. ::: ::::{note} - If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-administering-endpoints.html). + If you’re having issues establishing the connection and the remote cluster is part of an {{ece}} environment with a private certificate, make sure that the proxy address and server name match with the the certificate information. For more information, refer to [Administering endpoints in {{ece}}](/deploy-manage/deploy/cloud-enterprise/change-endpoint-urls.md). :::: 4. Click **Next**. 5. Click **Add remote cluster** (you have already established trust in a previous step). ::::{note} -This configuration of remote clusters uses the [Proxy mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#proxy-mode) and it requires that the allocators can communicate via http with the proxies. +This configuration of remote clusters uses the [Proxy mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#proxy-mode) and it requires that the allocators can communicate via http with the proxies. :::: @@ -347,7 +347,7 @@ curl -k -H 'Content-Type: application/json' -X PUT -H "Authorization: ApiKey $EC Note the following when using the Elastic Cloud Enterprise RESTful API: 1. A cluster alias must contain only letters, numbers, dashes (-), or underscores (_). -2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html#skip-unavailable-clusters). +2. To learn about skipping disconnected clusters, refer to the [{{es}} documentation](/solutions/search/cross-cluster-search.md#skip-unavailable-clusters). 3. When remote clusters are already configured for a deployment, the `PUT` request replaces the existing configuration with the new configuration passed. Passing an empty array of resources will remove all remote clusters. The following API request retrieves the remote clusters configuration: diff --git a/deploy-manage/remote-clusters/eck-remote-clusters.md b/deploy-manage/remote-clusters/eck-remote-clusters.md index b971765b7a..e45fa9e84f 100644 --- a/deploy-manage/remote-clusters/eck-remote-clusters.md +++ b/deploy-manage/remote-clusters/eck-remote-clusters.md @@ -5,7 +5,7 @@ mapped_pages: # ECK remote clusters [k8s-remote-clusters] -The [remote clusters module](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html) in Elasticsearch enables you to establish uni-directional connections to a remote cluster. This functionality is used in cross-cluster replication and cross-cluster search. +The [remote clusters module](/deploy-manage/remote-clusters/remote-clusters-self-managed.md) in Elasticsearch enables you to establish uni-directional connections to a remote cluster. This functionality is used in cross-cluster replication and cross-cluster search. When using remote cluster connections with ECK, the setup process depends on where the remote cluster is deployed. @@ -77,7 +77,7 @@ spec: version: 8.16.1 ``` -1. This requires the sample data: [https://www.elastic.co/guide/en/kibana/current/get-started.html#gs-get-data-into-kibana](https://www.elastic.co/guide/en/kibana/current/get-started.html#gs-get-data-into-kibana) +1. This requires the sample data: [/explore-analyze/index.md#gs-get-data-into-kibana](https://www.elastic.co/guide/en/kibana/current/get-started.html#gs-get-data-into-kibana) You can find a complete example in the [recipes directory](https://github.com/elastic/cloud-on-k8s/tree/2.16/config/recipes/remoteclusters). @@ -138,7 +138,7 @@ kubectl get secret cluster-one-es-transport-certs-public \ -o go-template='{{index .data "ca.crt" | base64decode}}' > remote.ca.crt ``` -You then need to configure the CA as one of the trusted CAs in `cluster-two`. If that cluster is hosted outside of Kubernetes, take the CA certificate that you have just extracted and add it to the list of CAs in [`xpack.security.transport.ssl.certificate_authorities`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#_pem_encoded_files_3). +You then need to configure the CA as one of the trusted CAs in `cluster-two`. If that cluster is hosted outside of Kubernetes, take the CA certificate that you have just extracted and add it to the list of CAs in [`xpack.security.transport.ssl.certificate_authorities`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#_pem_encoded_files_3). ::::{note} Beware of copying the source Secret as-is into a different namespace. Check [Common Problems: Owner References](../../troubleshoot/deployments/cloud-on-k8s/common-problems.md#k8s-common-problems-owner-refs) for more information. diff --git a/deploy-manage/remote-clusters/remote-clusters-api-key.md b/deploy-manage/remote-clusters/remote-clusters-api-key.md index 251774bae0..2ad07a9520 100644 --- a/deploy-manage/remote-clusters/remote-clusters-api-key.md +++ b/deploy-manage/remote-clusters/remote-clusters-api-key.md @@ -11,7 +11,7 @@ All cross-cluster requests from the local cluster are bound by the API key’s p On the local cluster side, not every local user needs to access every piece of data allowed by the API key. An administrator of the local cluster can further configure additional permission constraints on local users so each user only gets access to the necessary remote data. Note it is only possible to further reduce the permissions allowed by the API key for individual local users. It is impossible to increase the permissions to go beyond what is allowed by the API key. -In this model, cross-cluster operations use [a dedicated server port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote_cluster.port) (remote cluster interface) for communication between clusters. A remote cluster must enable this port for local clusters to connect. Configure Transport Layer Security (TLS) for this port to maximize security (as explained in [Establish trust with a remote cluster](#remote-clusters-security-api-key)). +In this model, cross-cluster operations use [a dedicated server port](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#remote_cluster.port) (remote cluster interface) for communication between clusters. A remote cluster must enable this port for local clusters to connect. Configure Transport Layer Security (TLS) for this port to maximize security (as explained in [Establish trust with a remote cluster](#remote-clusters-security-api-key)). The local cluster must trust the remote cluster on the remote cluster interface. This means that the local cluster trusts the remote cluster’s certificate authority (CA) that signs the server certificate used by the remote cluster interface. When establishing a connection, all nodes from the local cluster that participate in cross-cluster communication verify certificates from nodes on the other side, based on the TLS trust configuration. @@ -26,7 +26,7 @@ If you run into any issues, refer to [Troubleshooting](remote-clusters-troublesh ## Prerequisites [remote-clusters-prerequisites-api-key] -* The {{es}} security features need to be enabled on both clusters, on every node. Security is enabled by default. If it’s disabled, set `xpack.security.enabled` to `true` in `elasticsearch.yml`. Refer to [General security settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#general-security-settings). +* The {{es}} security features need to be enabled on both clusters, on every node. Security is enabled by default. If it’s disabled, set `xpack.security.enabled` to `true` in `elasticsearch.yml`. Refer to [General security settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings). * The nodes of the local and remote clusters must be on version 8.10 or later. * The local and remote clusters must have an appropriate license. For more information, refer to [https://www.elastic.co/subscriptions](https://www.elastic.co/subscriptions). @@ -78,7 +78,7 @@ If a remote cluster is part of an {{ess}} deployment, it has a valid certificate 4. If the remote cluster has multiple nodes, you can either: * create a single wildcard certificate for all nodes; - * or, create separate certificates for each node either manually or in batch with the [silent mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html#certutil-silent). + * or, create separate certificates for each node either manually or in batch with the [silent mode](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/certutil.md#certutil-silent). 3. On every node of the remote cluster: @@ -205,7 +205,7 @@ Use the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasti The `seeds` parameter specifies the hostname and [remote cluster port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) (default `9443`) of a seed node in the remote cluster. -The `mode` parameter determines the configured connection mode, which defaults to [`sniff`](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode). Because `cluster_one` doesn’t specify a `mode`, it uses the default. Both `cluster_two` and `cluster_three` explicitly use different modes. +The `mode` parameter determines the configured connection mode, which defaults to [`sniff`](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode). Because `cluster_one` doesn’t specify a `mode`, it uses the default. Both `cluster_two` and `cluster_three` explicitly use different modes. ```console PUT _cluster/settings diff --git a/deploy-manage/remote-clusters/remote-clusters-cert.md b/deploy-manage/remote-clusters/remote-clusters-cert.md index 40008ac9a4..d3e210edd5 100644 --- a/deploy-manage/remote-clusters/remote-clusters-cert.md +++ b/deploy-manage/remote-clusters/remote-clusters-cert.md @@ -23,7 +23,7 @@ If you run into any issues, refer to [Troubleshooting](remote-clusters-troublesh ## Prerequisites [remote-clusters-prerequisites-cert] -1. The {{es}} security features need to be enabled on both clusters, on every node. Security is enabled by default. If it’s disabled, set `xpack.security.enabled` to `true` in `elasticsearch.yml`. Refer to [General security settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#general-security-settings). +1. The {{es}} security features need to be enabled on both clusters, on every node. Security is enabled by default. If it’s disabled, set `xpack.security.enabled` to `true` in `elasticsearch.yml`. Refer to [General security settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings). 2. The local and remote clusters versions must be compatible. * Any node can communicate with another node on the same major version. For example, 7.0 can talk to any 7.x node. @@ -84,7 +84,7 @@ You must have the `manage` cluster privilege to connect remote clusters. :::: -The local cluster uses the [transport interface](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) to establish communication with remote clusters. The coordinating nodes in the local cluster establish [long-lived](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#long-lived-connections) TCP connections with specific nodes in the remote cluster. {{es}} requires these connections to remain open, even if the connections are idle for an extended period. +The local cluster uses the [transport interface](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md) to establish communication with remote clusters. The coordinating nodes in the local cluster establish [long-lived](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#long-lived-connections) TCP connections with specific nodes in the remote cluster. {{es}} requires these connections to remain open, even if the connections are idle for an extended period. To add a remote cluster from Stack Management in {{kib}}: @@ -151,7 +151,7 @@ Use the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasti The `seeds` parameter specifies the hostname and [transport port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) (default `9300`) of a seed node in the remote cluster. -The `mode` parameter determines the configured connection mode, which defaults to [`sniff`](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode). Because `cluster_one` doesn’t specify a `mode`, it uses the default. Both `cluster_two` and `cluster_three` explicitly use different modes. +The `mode` parameter determines the configured connection mode, which defaults to [`sniff`](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode). Because `cluster_one` doesn’t specify a `mode`, it uses the default. Both `cluster_two` and `cluster_three` explicitly use different modes. ```console PUT _cluster/settings diff --git a/deploy-manage/remote-clusters/remote-clusters-migrate.md b/deploy-manage/remote-clusters/remote-clusters-migrate.md index 3bf205d98d..7c887b439f 100644 --- a/deploy-manage/remote-clusters/remote-clusters-migrate.md +++ b/deploy-manage/remote-clusters/remote-clusters-migrate.md @@ -39,7 +39,7 @@ On the remote cluster: 1. Enable the remote cluster server on every node of the remote cluster. In `elasticsearch.yml`: - 1. Set [`remote_cluster_server.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote-cluster-network-settings) to `true`. + 1. Set [`remote_cluster_server.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#remote-cluster-network-settings) to `true`. 2. Configure the bind and publish address for remote cluster server traffic, for example using [`remote_cluster.host`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote-cluster-network-settings). Without configuring the address, remote cluster traffic may be bound to the local interface, and remote clusters running on other machines can’t connect. 3. Optionally, configure the remote server port using [`remote_cluster.port`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote_cluster.port) (defaults to `9443`). @@ -75,7 +75,7 @@ On the remote cluster: 4. If the remote cluster has multiple nodes, you can either: * create a single wildcard certificate for all nodes; - * or, create separate certificates for each node either manually or in batch with the [silent mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html#certutil-silent). + * or, create separate certificates for each node either manually or in batch with the [silent mode](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/certutil.md#certutil-silent). 3. On every node of the remote cluster: diff --git a/deploy-manage/remote-clusters/remote-clusters-self-managed.md b/deploy-manage/remote-clusters/remote-clusters-self-managed.md index 26b293f965..0e952b2d0c 100644 --- a/deploy-manage/remote-clusters/remote-clusters-self-managed.md +++ b/deploy-manage/remote-clusters/remote-clusters-self-managed.md @@ -10,18 +10,18 @@ You can connect a local cluster to other {{es}} clusters, known as *remote clust ## {{ccr-cap}} [remote-clusters-ccr] -With [{{ccr}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-ccr.html), you ingest data to an index on a remote cluster. This *leader* index is replicated to one or more read-only *follower* indices on your local cluster. Creating a multi-cluster architecture with {{ccr}} enables you to configure disaster recovery, bring data closer to your users, or establish a centralized reporting cluster to process reports locally. +With [{{ccr}}](/deploy-manage/tools/cross-cluster-replication.md), you ingest data to an index on a remote cluster. This *leader* index is replicated to one or more read-only *follower* indices on your local cluster. Creating a multi-cluster architecture with {{ccr}} enables you to configure disaster recovery, bring data closer to your users, or establish a centralized reporting cluster to process reports locally. ## {{ccs-cap}} [remote-clusters-ccs] -[{{ccs-cap}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cross-cluster-search.html) enables you to run a search request against one or more remote clusters. This capability provides each region with a global view of all clusters, allowing you to send a search request from a local cluster and return results from all connected remote clusters. For full {{ccs}} capabilities, the local and remote cluster must be on the same [subscription level](https://www.elastic.co/subscriptions). +[{{ccs-cap}}](/solutions/search/cross-cluster-search.md) enables you to run a search request against one or more remote clusters. This capability provides each region with a global view of all clusters, allowing you to send a search request from a local cluster and return results from all connected remote clusters. For full {{ccs}} capabilities, the local and remote cluster must be on the same [subscription level](https://www.elastic.co/subscriptions). ## Add remote clusters [add-remote-clusters] ::::{note} -The instructions that follow describe how to create a remote connection from a self-managed cluster. You can also set up {{ccs}} and {{ccr}} from an [{{ess}} deployment](https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html) or from an [{{ece}} deployment](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html). +The instructions that follow describe how to create a remote connection from a self-managed cluster. You can also set up {{ccs}} and {{ccr}} from an [{{ess}} deployment](/deploy-manage/remote-clusters/ec-enable-ccs.md) or from an [{{ece}} deployment](/deploy-manage/remote-clusters/ece-enable-ccs.md). :::: @@ -57,7 +57,7 @@ Sniff mode The *gateway nodes* selection depends on the following criteria: * **version**: Remote nodes must be compatible with the cluster they are registered to. - * **role**: By default, any non-[master-eligible](https://www.elastic.co/guide/en/elasticsearch/reference/current/node-roles-overview.html#master-node-role) node can act as a gateway node. Dedicated master nodes are never selected as gateway nodes. + * **role**: By default, any non-[master-eligible](/deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md#master-node-role) node can act as a gateway node. Dedicated master nodes are never selected as gateway nodes. * **attributes**: You can define the gateway nodes for a cluster by setting [`cluster.remote.node.attr.gateway`](remote-clusters-settings.md#cluster-remote-node-attr) to `true`. However, such nodes still have to satisfy the two above requirements. diff --git a/deploy-manage/remote-clusters/remote-clusters-settings.md b/deploy-manage/remote-clusters/remote-clusters-settings.md index a6b31c5d27..7c2ea8a89e 100644 --- a/deploy-manage/remote-clusters/remote-clusters-settings.md +++ b/deploy-manage/remote-clusters/remote-clusters-settings.md @@ -5,7 +5,7 @@ mapped_pages: # Remote cluster settings [remote-clusters-settings] -The following settings apply to both [sniff mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode) and [proxy mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#proxy-mode). Settings that are specific to sniff mode and proxy mode are described separately. +The following settings apply to both [sniff mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode) and [proxy mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#proxy-mode). Settings that are specific to sniff mode and proxy mode are described separately. `cluster.remote..mode` : The mode used for a remote cluster connection. The only supported modes are `sniff` and `proxy`. The default is `sniff`. See [Connection modes](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-proxy-modes) for further information about these modes, and [Sniff mode remote cluster settings](#remote-cluster-sniff-settings) and [Proxy mode remote cluster settings](#remote-cluster-proxy-settings) for further information about their settings. @@ -13,8 +13,8 @@ The following settings apply to both [sniff mode](https://www.elastic.co/guide/e `cluster.remote.initial_connect_timeout` : The time to wait for remote connections to be established when the node starts. The default is `30s`. -`remote_cluster_client` [role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) -: By default, any node in the cluster can act as a cross-cluster client and connect to remote clusters. To prevent a node from connecting to remote clusters, specify the [node.roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) setting in `elasticsearch.yml` and exclude `remote_cluster_client` from the listed roles. Search requests targeting remote clusters must be sent to a node that is allowed to act as a cross-cluster client. Other features such as {{ml}} [data feeds](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html#general-ml-settings), [transforms](https://www.elastic.co/guide/en/elasticsearch/reference/current/transform-settings.html#general-transform-settings), and [{{ccr}}](../tools/cross-cluster-replication/set-up-cross-cluster-replication.md) require the `remote_cluster_client` role. +`remote_cluster_client` [role](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) +: By default, any node in the cluster can act as a cross-cluster client and connect to remote clusters. To prevent a node from connecting to remote clusters, specify the [node.roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) setting in `elasticsearch.yml` and exclude `remote_cluster_client` from the listed roles. Search requests targeting remote clusters must be sent to a node that is allowed to act as a cross-cluster client. Other features such as {{ml}} [data feeds](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md#general-ml-settings), [transforms](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/transforms-settings.md#general-transform-settings), and [{{ccr}}](../tools/cross-cluster-replication/set-up-cross-cluster-replication.md) require the `remote_cluster_client` role. `cluster.remote..skip_unavailable` : Per cluster boolean setting that allows to skip specific clusters when no nodes belonging to them are available and they are the target of a remote cluster request. @@ -25,7 +25,7 @@ In Elasticsearch 8.15, the default value for `skip_unavailable` was changed from `cluster.remote..transport.ping_schedule` -: Sets the time interval between regular application-level ping messages that are sent to try and keep remote cluster connections alive. If set to `-1`, application-level ping messages to this remote cluster are not sent. If unset, application-level ping messages are sent according to the global `transport.ping_schedule` setting, which defaults to `-1` meaning that pings are not sent. It is preferable to correctly configure TCP keep-alives instead of configuring a `ping_schedule`, because TCP keep-alives are handled by the operating system and not by {{es}}. By default {{es}} enables TCP keep-alives on remote cluster connections. Remote cluster connections are transport connections so the `transport.tcp.*` [advanced settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) regarding TCP keep-alives apply to them. +: Sets the time interval between regular application-level ping messages that are sent to try and keep remote cluster connections alive. If set to `-1`, application-level ping messages to this remote cluster are not sent. If unset, application-level ping messages are sent according to the global `transport.ping_schedule` setting, which defaults to `-1` meaning that pings are not sent. It is preferable to correctly configure TCP keep-alives instead of configuring a `ping_schedule`, because TCP keep-alives are handled by the operating system and not by {{es}}. By default {{es}} enables TCP keep-alives on remote cluster connections. Remote cluster connections are transport connections so the `transport.tcp.*` [advanced settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#transport-settings) regarding TCP keep-alives apply to them. `cluster.remote..transport.compress` : Per-cluster setting that enables you to configure compression for requests to a specific remote cluster. The handling cluster will automatically compress responses to compressed requests. The setting options are `true`, `indexing_data`, and `false`. If unset, defaults to the behaviour specified by the node-wide `transport.compress` setting. See the [documentation for the `transport.compress` setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings-compress) for further information. diff --git a/deploy-manage/remote-clusters/remote-clusters-troubleshooting.md b/deploy-manage/remote-clusters/remote-clusters-troubleshooting.md index 5fe48aee9e..8985932c38 100644 --- a/deploy-manage/remote-clusters/remote-clusters-troubleshooting.md +++ b/deploy-manage/remote-clusters/remote-clusters-troubleshooting.md @@ -49,7 +49,7 @@ The API should return `"connected" : true`. When using [API key authentication]( When using API key authentication, cross-cluster traffic happens on the remote cluster interface, instead of the transport interface. The remote cluster interface is not enabled by default. This means a node is not ready to accept incoming cross-cluster requests by default, while it is ready to send outgoing cross-cluster requests. Ensure you’ve enabled the remote cluster server on every node of the remote cluster. In `elasticsearch.yml`: -* Set [`remote_cluster_server.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote-cluster-network-settings) to `true`. +* Set [`remote_cluster_server.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#remote-cluster-network-settings) to `true`. * Configure the bind and publish address for remote cluster server traffic, for example using [`remote_cluster.host`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote-cluster-network-settings). Without configuring the address, remote cluster traffic may be bound to the local interface, and remote clusters running on other machines can’t connect. * Optionally, configure the remote server port using [`remote_cluster.port`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#remote_cluster.port) (defaults to `9443`). diff --git a/deploy-manage/security/different-ca.md b/deploy-manage/security/different-ca.md index f335bffe2b..b8015fa8f8 100644 --- a/deploy-manage/security/different-ca.md +++ b/deploy-manage/security/different-ca.md @@ -97,7 +97,7 @@ The following examples use PKCS#12 files, but the same steps apply to JKS keysto Now that your CA truststore is updated, use your new CA certificate to sign a certificate for your nodes. ::::{note} -If your organization has its own CA, you’ll need to [generate Certificate Signing Requests (CSRs)](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html#certutil-csr). CSRs contain information that your CA uses to generate and sign a security certificate. +If your organization has its own CA, you’ll need to [generate Certificate Signing Requests (CSRs)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/certutil.md#certutil-csr). CSRs contain information that your CA uses to generate and sign a security certificate. :::: diff --git a/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md b/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md index 908fe4cfbd..67232ef966 100644 --- a/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md +++ b/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md @@ -9,7 +9,7 @@ The TLS and SSL protocols use a cipher suite that determines the strength of enc The *Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files* enable the use of additional cipher suites for Java in a separate JAR file that you need to add to your Java installation. You can download this JAR file from Oracle’s [download page](http://www.oracle.com/technetwork/java/javase/downloads/index.md). The *JCE Unlimited Strength Jurisdiction Policy Files`* are required for encryption with key lengths greater than 128 bits, such as 256-bit AES encryption. -After installation, all cipher suites in the JCE are available for use but requires configuration in order to use them. To enable the use of stronger cipher suites with {{es}} {{security-features}}, configure the [`cipher_suites` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ssl-tls-settings). +After installation, all cipher suites in the JCE are available for use but requires configuration in order to use them. To enable the use of stronger cipher suites with {{es}} {{security-features}}, configure the [`cipher_suites` parameter](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ssl-tls-settings). ::::{note} The *JCE Unlimited Strength Jurisdiction Policy Files* must be installed on all nodes in the cluster to establish an improved level of encryption strength. diff --git a/deploy-manage/security/encrypt-deployment-with-customer-managed-encryption-key.md b/deploy-manage/security/encrypt-deployment-with-customer-managed-encryption-key.md index 3a254e28fe..abd0a63fe4 100644 --- a/deploy-manage/security/encrypt-deployment-with-customer-managed-encryption-key.md +++ b/deploy-manage/security/encrypt-deployment-with-customer-managed-encryption-key.md @@ -35,7 +35,7 @@ When a deployment encrypted with a customer-managed key is deleted or terminated ::::::{tab-item} AWS * Have permissions on AWS KMS to [create a symmetric AWS KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.md#symmetric-cmks) and to configure AWS IAM roles. -* Consider the cloud regions where you need your deployment to live. Refer to the [list of available regions, deployment templates, and instance configurations](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html) supported by Elastic Cloud. +* Consider the cloud regions where you need your deployment to live. Refer to the [list of available regions, deployment templates, and instance configurations](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md) supported by Elastic Cloud. :::::: ::::::{tab-item} Azure @@ -170,7 +170,7 @@ Provide your key identifier without the key version identifier so Elastic Cloud * using the API: * Choose a **cloud region** and a **deployment template** (also called hardware profile) for your deployment from the [list of available regions, deployment templates, and instance configurations](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html). - * [Get a valid Elastic Cloud API key](https://www.elastic.co/guide/en/cloud/current/ec-api-authentication.html) with the **Organization owner** role or the **Admin** role on deployments. These roles allow you to create new deployments. + * [Get a valid Elastic Cloud API key](/deploy-manage/api-keys/elastic-cloud-api-keys.md) with the **Organization owner** role or the **Admin** role on deployments. These roles allow you to create new deployments. * Get the ARN of the symmetric AWS KMS key or of its alias. Use an alias if you are planning to do manual key rotations as specified in the [AWS documentation](https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.md). * Use these parameters to create a new deployment with the [Elastic Cloud API](https://www.elastic.co/docs/api/doc/cloud/group/endpoint-deployments). For example: diff --git a/deploy-manage/security/httprest-clients-security.md b/deploy-manage/security/httprest-clients-security.md index 38892be85f..76011038fb 100644 --- a/deploy-manage/security/httprest-clients-security.md +++ b/deploy-manage/security/httprest-clients-security.md @@ -70,11 +70,11 @@ es-secondary-authorization: ApiKey <1> For more information about using {{security-features}} with the language specific clients, refer to: -* [Java](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current/_basic_authentication.html) -* [JavaScript](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html) -* [.NET](https://www.elastic.co/guide/en/elasticsearch/client/net-api/current/configuration.html) +* [Java](asciidocalypse://docs/elasticsearch-java/docs/reference/elasticsearch/elasticsearch-client-java-api-client/_basic_authentication.md) +* [JavaScript](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/connecting.md) +* [.NET](asciidocalypse://docs/elasticsearch-net/docs/reference/elasticsearch/elasticsearch-client-net-api/configuration.md) * [Perl](https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION) -* [PHP](https://www.elastic.co/guide/en/elasticsearch/client/php-api/current/connecting.html) +* [PHP](asciidocalypse://docs/elasticsearch-php/docs/reference/elasticsearch/elasticsearch-client-php-api/connecting.md) * [Python](https://elasticsearch-py.readthedocs.io/en/master/#ssl-and-authentication) * [Ruby](https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication) diff --git a/deploy-manage/security/kibana-session-management.md b/deploy-manage/security/kibana-session-management.md index 14e45383d4..7e63a2cb5e 100644 --- a/deploy-manage/security/kibana-session-management.md +++ b/deploy-manage/security/kibana-session-management.md @@ -9,7 +9,7 @@ When you log in, {{kib}} creates a session that is used to authenticate subseque When your session expires, or you log out, {{kib}} will invalidate your cookie and remove session information from the index. {{kib}} also periodically invalidates and removes any expired sessions that weren’t explicitly invalidated. -To manage user sessions programmatically, {{kib}} exposes [session management APIs](https://www.elastic.co/guide/en/kibana/current/session-management-api.html). For details, check out [Session and cookie security settings](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#security-session-and-cookie-settings). +To manage user sessions programmatically, {{kib}} exposes [session management APIs](https://www.elastic.co/guide/en/kibana/current/session-management-api.html). For details, check out [Session and cookie security settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#security-session-and-cookie-settings). ## Session idle timeout [session-idle-timeout] diff --git a/deploy-manage/security/same-ca.md b/deploy-manage/security/same-ca.md index a70a096544..e8ae052dac 100644 --- a/deploy-manage/security/same-ca.md +++ b/deploy-manage/security/same-ca.md @@ -115,7 +115,7 @@ Well done! You’ve updated the keystore for the transport layer. You can also [ Other components such as {{kib}} or any of the Elastic language clients verify this certificate when they connect to {{es}}. ::::{note} -If your organization has its own CA, you’ll need to [generate Certificate Signing Requests (CSRs)](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html#certutil-csr). CSRs contain information that your CA uses to generate and sign a certificate. +If your organization has its own CA, you’ll need to [generate Certificate Signing Requests (CSRs)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/certutil.md#certutil-csr). CSRs contain information that your CA uses to generate and sign a certificate. :::: diff --git a/deploy-manage/security/secure-clients-integrations.md b/deploy-manage/security/secure-clients-integrations.md index 2f7212cb1c..a7cb47df26 100644 --- a/deploy-manage/security/secure-clients-integrations.md +++ b/deploy-manage/security/secure-clients-integrations.md @@ -9,18 +9,18 @@ You will need to update the configuration for several [clients](httprest-clients The {{es}} {{security-features}} enable you to secure your {{es}} cluster. But {{es}} itself is only one product within the {{stack}}. It is often the case that other products in the {{stack}} are connected to the cluster and therefore need to be secured as well, or at least communicate with the cluster in a secured way: -* [Apache Hadoop](https://www.elastic.co/guide/en/elasticsearch/reference/current/hadoop.html) -* [Auditbeat](https://www.elastic.co/guide/en/beats/auditbeat/current/securing-auditbeat.html) -* [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/securing-filebeat.html) -* [{{fleet}} & {{agent}}](https://www.elastic.co/guide/en/fleet/current/secure.html) -* [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/securing-heartbeat.html) +* [Apache Hadoop](asciidocalypse://docs/elasticsearch-hadoop/docs/reference/ingestion-tools/elasticsearch-hadoop/security.md) +* [Auditbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-auditbeat/securing-auditbeat.md) +* [Filebeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/securing-filebeat.md) +* [{{fleet}} & {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/secure.md) +* [Heartbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-heartbeat/securing-heartbeat.md) * [{{kib}}](../security.md) -* [Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) -* [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/securing-metricbeat.html) +* [Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/secure-connection.md) +* [Metricbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/securing-metricbeat.md) * [Monitoring and security](../monitor.md) -* [Packetbeat](https://www.elastic.co/guide/en/beats/packetbeat/current/securing-packetbeat.html) +* [Packetbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-packetbeat/securing-packetbeat.md) * [Reporting](../../explore-analyze/report-and-share.md) -* [Winlogbeat](https://www.elastic.co/guide/en/beats/winlogbeat/current/securing-winlogbeat.html) +* [Winlogbeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-winlogbeat/securing-winlogbeat.md) diff --git a/deploy-manage/security/secure-endpoints.md b/deploy-manage/security/secure-endpoints.md index aac93e5bd3..9db6054c0c 100644 --- a/deploy-manage/security/secure-endpoints.md +++ b/deploy-manage/security/secure-endpoints.md @@ -22,7 +22,7 @@ Never try to run {{es}} as the `root` user, which would invalidate any defense s Even with security enabled, never expose {{es}} to public internet traffic. Using an application to sanitize requests to {{es}} still poses risks, such as a malicious user writing [`_search`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-search) requests that could overwhelm an {{es}} cluster and bring it down. Keep {{es}} as isolated as possible, preferably behind a firewall and a VPN. Any internet-facing applications should run pre-canned aggregations, or not run aggregations at all. -While you absolutely shouldn’t expose {{es}} directly to the internet, you also shouldn’t expose {{es}} directly to users. Instead, use an intermediary application to make requests on behalf of users. This implementation allows you to track user behaviors, such as can submit requests, and to which specific nodes in the cluster. For example, you can implement an application that accepts a search term from a user and funnels it through a [`simple_query_string`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html) query. +While you absolutely shouldn’t expose {{es}} directly to the internet, you also shouldn’t expose {{es}} directly to users. Instead, use an intermediary application to make requests on behalf of users. This implementation allows you to track user behaviors, such as can submit requests, and to which specific nodes in the cluster. For example, you can implement an application that accepts a search term from a user and funnels it through a [`simple_query_string`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-simple-query-string-query.md) query. ## Implement role based access control [security-create-appropriate-users] diff --git a/deploy-manage/security/secure-saved-objects.md b/deploy-manage/security/secure-saved-objects.md index 9753ef1be4..7cf075884d 100644 --- a/deploy-manage/security/secure-saved-objects.md +++ b/deploy-manage/security/secure-saved-objects.md @@ -23,7 +23,7 @@ If you don’t specify an encryption key, {{kib}} might disable features that re ::::{tip} -For help generating the encryption key, refer to the [`kibana-encryption-keys`](https://www.elastic.co/guide/en/kibana/current/kibana-encryption-keys.html) script. +For help generating the encryption key, refer to the [`kibana-encryption-keys`](asciidocalypse://docs/kibana/docs/reference/commands/kibana-encryption-keys.md) script. :::: diff --git a/deploy-manage/security/security-certificates-keys.md b/deploy-manage/security/security-certificates-keys.md index 9af860c79d..feefc36726 100644 --- a/deploy-manage/security/security-certificates-keys.md +++ b/deploy-manage/security/security-certificates-keys.md @@ -39,7 +39,7 @@ There are [some cases](../deploy/self-managed/installing-elasticsearch.md#stack- 2. Copy the generated `elastic` password and enrollment token. These credentials are only shown when you start {{es}} for the first time. ::::{note} - If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](https://www.elastic.co/guide/en/elasticsearch/reference/current/create-enrollment-token.html) tool. These tools are available in the {{es}} `bin` directory. + If you need to reset the password for the `elastic` user or other built-in users, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) tool. To generate new enrollment tokens for {{kib}} or {{es}} nodes, run the [`elasticsearch-create-enrollment-token`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/create-enrollment-token.md) tool. These tools are available in the {{es}} `bin` directory. :::: @@ -90,7 +90,7 @@ When {{es}} starts for the first time, the security auto-configuration process b Before enrolling a new node, additional actions such as binding to an address other than `localhost` or satisfying bootstrap checks are typically necessary in production clusters. During that time, an auto-generated enrollment token could expire, which is why enrollment tokens aren’t generated automatically. -Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. +Additionally, only nodes on the same host can join the cluster without additional configuration. If you want nodes from another host to join your cluster, you need to set `transport.host` to a [supported value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#network-interface-values) (such as uncommenting the suggested value of `0.0.0.0`), or an IP address that’s bound to an interface where other hosts can reach it. Refer to [transport settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-settings) for more information. To enroll new nodes in your cluster, create an enrollment token with the `elasticsearch-create-enrollment-token` tool on any existing node in your cluster. You can then start a new node with the `--enrollment-token` parameter so that it joins an existing cluster. @@ -177,7 +177,7 @@ When you install {{es}}, the following certificates and keys are generated in th `transport.p12` : Keystore that contains the key and certificate for the transport layer for all the nodes in your cluster. -`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-keystore.html) tool. +`http.p12` and `transport.p12` are password-protected PKCS#12 keystores. {{es}} stores the passwords for these keystores as [secure settings](secure-settings.md). To retrieve the passwords so that you can inspect or change the keystore contents, use the [`bin/elasticsearch-keystore`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/elasticsearch-keystore.md) tool. Use the following command to retrieve the password for `http.p12`: @@ -228,11 +228,11 @@ The {{es}} configuration directory isn’t writable The following settings are incompatible with security auto configuration. If any of these settings exist, the node startup process skips configuring security automatically and the node starts normally. -* [`node.roles`](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) is set to a value where the node can’t be elected as `master`, or if the node can’t hold data -* [`xpack.security.autoconfiguration.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#general-security-settings) is set to `false` +* [`node.roles`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) is set to a value where the node can’t be elected as `master`, or if the node can’t hold data +* [`xpack.security.autoconfiguration.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#general-security-settings) is set to `false` * [`xpack.security.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#general-security-settings) has a value set * Any of the [`xpack.security.transport.ssl.*`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#transport-tls-ssl-settings) or [`xpack.security.http.ssl.*`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#http-tls-ssl-settings) settings have a value set in the `elasticsearch.yml` configuration file or in the `elasticsearch.keystore` -* Any of the `discovery.type`, `discovery.seed_hosts`, or `cluster.initial_master_nodes` [discovery and cluster formation settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-settings.html) have a value set +* Any of the `discovery.type`, `discovery.seed_hosts`, or `cluster.initial_master_nodes` [discovery and cluster formation settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/discovery-cluster-formation-settings.md) have a value set ::::{note} Exceptions are when `discovery.type` is set to `single-node`, or when `cluster.initial_master_nodes` exists but contains only the name of the current node. diff --git a/deploy-manage/security/set-up-basic-security-plus-https.md b/deploy-manage/security/set-up-basic-security-plus-https.md index cf376793da..111e17a050 100644 --- a/deploy-manage/security/set-up-basic-security-plus-https.md +++ b/deploy-manage/security/set-up-basic-security-plus-https.md @@ -201,11 +201,11 @@ After making these changes, you must always access {{kib}} via HTTPS. For exampl ## Configure {{beats}} security [configure-beats-security] -{{beats}} are open source data shippers that you install as agents on your servers to send operational data to {{es}}. Each Beat is a separately installable product. The following steps cover configuring security for {{metricbeat}}. Follow these steps for each [additional Beat](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html) you want to configure security for. +{{beats}} are open source data shippers that you install as agents on your servers to send operational data to {{es}}. Each Beat is a separately installable product. The following steps cover configuring security for {{metricbeat}}. Follow these steps for each [additional Beat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md) you want to configure security for. ### Prerequisites [_prerequisites_13] -[Install {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html) using your preferred method. +[Install {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md) using your preferred method. ::::{important} You cannot connect to the {{stack}} or configure assets for {{metricbeat}} before completing the following steps. @@ -269,7 +269,7 @@ To send monitoring data securely, create a monitoring user and grant it the nece You can use the built-in `beats_system` user, if it’s available in your environment. Because the built-in users are not available in {{ecloud}}, these instructions create a user that is explicitly used for monitoring {{metricbeat}}. -1. If you’re using the built-in `beats_system` user, on any node in your cluster, run the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) utility to set the password for that user: +1. If you’re using the built-in `beats_system` user, on any node in your cluster, run the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) utility to set the password for that user: This command resets the password for the `beats_system` user to an auto-generated value. @@ -441,7 +441,7 @@ In production environments, we strongly recommend using a separate cluster (refe verification_mode: "certificate" ``` - 1. Configuring SSL is required when monitoring a node with encrypted traffic. See [Configure SSL for {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-ssl.html).`hosts` + 1. Configuring SSL is required when monitoring a node with encrypted traffic. See [Configure SSL for {{metricbeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-ssl.md).`hosts` : Specifies the host where your {{es}} cluster is running. Ensure that you include `https` in the URL. `username` diff --git a/deploy-manage/security/set-up-basic-security.md b/deploy-manage/security/set-up-basic-security.md index 458d827c44..e60f3aa302 100644 --- a/deploy-manage/security/set-up-basic-security.md +++ b/deploy-manage/security/set-up-basic-security.md @@ -81,7 +81,7 @@ Complete the following steps **for each node in your cluster**. To join the same 1. Open the `$ES_PATH_CONF/elasticsearch.yml` file and make the following changes: - 1. Add the [`cluster-name`](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-name) setting and enter a name for your cluster: + 1. Add the [`cluster-name`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-name) setting and enter a name for your cluster: ```yaml cluster.name: my-cluster @@ -105,7 +105,7 @@ Complete the following steps **for each node in your cluster**. To join the same xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 ``` - 1. If you want to use hostname verification, set the verification mode to `full`. You should generate a different certificate for each host that matches the DNS or IP address. See the `xpack.security.transport.ssl.verification_mode` parameter in [TLS settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#transport-tls-ssl-settings). + 1. If you want to use hostname verification, set the verification mode to `full`. You should generate a different certificate for each host that matches the DNS or IP address. See the `xpack.security.transport.ssl.verification_mode` parameter in [TLS settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#transport-tls-ssl-settings). 2. If you entered a password when creating the node certificate, run the following commands to store the password in the {{es}} keystore: diff --git a/deploy-manage/security/set-up-minimal-security.md b/deploy-manage/security/set-up-minimal-security.md index 4a6f6e4db9..60c8c9461e 100644 --- a/deploy-manage/security/set-up-minimal-security.md +++ b/deploy-manage/security/set-up-minimal-security.md @@ -61,7 +61,7 @@ You only need to set passwords for the `elastic` and `kibana_system` users when ./bin/elasticsearch ``` -2. On any node in your cluster, open another terminal window and set the password for the `elastic` built-in user by running the [`elasticsearch-reset-password`](https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-password.html) utility. This command resets the password to an auto-generated value. +2. On any node in your cluster, open another terminal window and set the password for the `elastic` built-in user by running the [`elasticsearch-reset-password`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/reset-password.md) utility. This command resets the password to an auto-generated value. ```shell ./bin/elasticsearch-reset-password -u elastic diff --git a/deploy-manage/security/supported-ssltls-versions-by-jdk-version.md b/deploy-manage/security/supported-ssltls-versions-by-jdk-version.md index 726580b650..fbf7d469ec 100644 --- a/deploy-manage/security/supported-ssltls-versions-by-jdk-version.md +++ b/deploy-manage/security/supported-ssltls-versions-by-jdk-version.md @@ -93,7 +93,7 @@ jdk.tls.disabledAlgorithms=SSLv3, TLSv1, RC4, DES, MD5withRSA, \ ### Enable your custom security configuration [_enable_your_custom_security_configuration] -To enable your custom security policy, add a file in the [`jvm.options.d`](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-options) directory within your {{es}} configuration directory. +To enable your custom security policy, add a file in the [`jvm.options.d`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/jvm-settings.md#set-jvm-options) directory within your {{es}} configuration directory. To enable your custom security policy, create a file named `java.security.options` within the [jvm.options.d](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-options) directory of your {{es}} configuration directory, with this content: @@ -105,7 +105,7 @@ To enable your custom security policy, create a file named `java.security.option ## Enabling TLS versions in {{es}} [_enabling_tls_versions_in_es] -SSL/TLS versions can be enabled and disabled within {{es}} via the [`ssl.supported_protocols` settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ssl-tls-settings). +SSL/TLS versions can be enabled and disabled within {{es}} via the [`ssl.supported_protocols` settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ssl-tls-settings). {{es}} will only support the TLS versions that are enabled by the [underlying JDK](). If you configure `ssl.supported_procotols` to include a TLS version that is not enabled in your JDK, then it will be silently ignored. diff --git a/deploy-manage/tools/cross-cluster-replication.md b/deploy-manage/tools/cross-cluster-replication.md index 05c6f03562..1f78d6c1cb 100644 --- a/deploy-manage/tools/cross-cluster-replication.md +++ b/deploy-manage/tools/cross-cluster-replication.md @@ -178,7 +178,7 @@ When you create a follower index, you cannot use it until it is fully initialize Remote recovery is a network intensive process that transfers all of the Lucene segment files from the leader cluster to the follower cluster. The follower requests that a recovery session be initiated on the primary shard in the leader cluster. The follower then requests file chunks concurrently from the leader. By default, the process concurrently requests five 1MB file chunks. This default behavior is designed to support leader and follower clusters with high network latency between them. ::::{tip} -You can modify dynamic [remote recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-settings.html#ccr-recovery-settings) to rate-limit the transmitted data and manage the resources consumed by remote recoveries. +You can modify dynamic [remote recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cross-cluster-replication-settings.md#ccr-recovery-settings) to rate-limit the transmitted data and manage the resources consumed by remote recoveries. :::: @@ -187,11 +187,11 @@ Use the [recovery API](https://www.elastic.co/docs/api/doc/elasticsearch/operati ## Replicating a leader requires soft deletes [ccr-leader-requirements] -{{ccr-cap}} works by replaying the history of individual write operations that were performed on the shards of the leader index. {{es}} needs to retain the [history of these operations](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-history-retention.html) on the leader shards so that they can be pulled by the follower shard tasks. The underlying mechanism used to retain these operations is *soft deletes*. +{{ccr-cap}} works by replaying the history of individual write operations that were performed on the shards of the leader index. {{es}} needs to retain the [history of these operations](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/history-retention-settings.md) on the leader shards so that they can be pulled by the follower shard tasks. The underlying mechanism used to retain these operations is *soft deletes*. A soft delete occurs whenever an existing document is deleted or updated. By retaining these soft deletes up to configurable limits, the history of operations can be retained on the leader shards and made available to the follower shard tasks as it replays the history of operations. -The [`index.soft_deletes.retention_lease.period`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#ccr-index-soft-deletes-retention-period) setting defines the maximum time to retain a shard history retention lease before it is considered expired. This setting determines how long the cluster containing your follower index can be offline, which is 12 hours by default. If a shard copy recovers after its retention lease expires, but the missing operations are still available on the leader index, then {{es}} will establish a new lease and copy the missing operations. However {{es}} does not guarantee to retain unleased operations, so it is also possible that some of the missing operations have been discarded by the leader and are now completely unavailable. If this happens then the follower cannot recover automatically so you must [recreate it](cross-cluster-replication/ccr-recreate-follower-index.md). +The [`index.soft_deletes.retention_lease.period`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#ccr-index-soft-deletes-retention-period) setting defines the maximum time to retain a shard history retention lease before it is considered expired. This setting determines how long the cluster containing your follower index can be offline, which is 12 hours by default. If a shard copy recovers after its retention lease expires, but the missing operations are still available on the leader index, then {{es}} will establish a new lease and copy the missing operations. However {{es}} does not guarantee to retain unleased operations, so it is also possible that some of the missing operations have been discarded by the leader and are now completely unavailable. If this happens then the follower cannot recover automatically so you must [recreate it](cross-cluster-replication/ccr-recreate-follower-index.md). Soft deletes must be enabled for indices that you want to use as leader indices. Soft deletes are enabled by default on new indices created on or after {{es}} 7.0.0. @@ -215,13 +215,13 @@ This following sections provide more information about how to configure and use {{ccr-cap}} is designed to replicate user-generated indices only, and doesn’t currently replicate any of the following: -* [System indices](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#system-indices) +* [System indices](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#system-indices) * [Machine learning jobs](../../explore-analyze/machine-learning.md) * [index templates](../../manage-data/data-store/templates.md) * [{{ilm-cap}}](../../manage-data/lifecycle/index-lifecycle-management.md) and [{{slm}}](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-slm) polices * [User permissions and role mappings](../users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) * [Snapshot repository settings](snapshot-and-restore/self-managed.md) -* [Cluster settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html) +* [Cluster settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md) * [Searchable snapshot](snapshot-and-restore/searchable-snapshots.md) If you want to replicate any of this data, you must replicate it to a remote cluster manually. diff --git a/deploy-manage/tools/cross-cluster-replication/_connect_to_a_remote_cluster.md b/deploy-manage/tools/cross-cluster-replication/_connect_to_a_remote_cluster.md index 998b59198e..f884218abe 100644 --- a/deploy-manage/tools/cross-cluster-replication/_connect_to_a_remote_cluster.md +++ b/deploy-manage/tools/cross-cluster-replication/_connect_to_a_remote_cluster.md @@ -13,7 +13,7 @@ To replicate an index on a remote cluster (Cluster A) to a local cluster (Cluste To configure a remote cluster from Stack Management in {{kib}}: -1. Set up a [secure connection](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#add-remote-clusters) as needed. +1. Set up a [secure connection](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#add-remote-clusters) as needed. 2. Select **Remote Clusters** from the side navigation. 3. Specify the {{es}} endpoint URL, or the IP address or host name of the remote cluster (`ClusterA`) followed by the transport port (defaults to `9300`). For example, `cluster.es.eastus2.staging.azure.foundit.no:9400` or `192.168.1.1:9300`. diff --git a/deploy-manage/tools/cross-cluster-replication/_failback_when_clustera_comes_back.md b/deploy-manage/tools/cross-cluster-replication/_failback_when_clustera_comes_back.md index 86a31375b4..7e85485cb2 100644 --- a/deploy-manage/tools/cross-cluster-replication/_failback_when_clustera_comes_back.md +++ b/deploy-manage/tools/cross-cluster-replication/_failback_when_clustera_comes_back.md @@ -55,7 +55,7 @@ When `clusterA` comes back, `clusterB` becomes the new leader and `clusterA` bec ``` ::::{tip} - If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see [index.soft_deletes.retention_lease.period](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#ccr-index-soft-deletes-retention-period) for more details. + If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see [index.soft_deletes.retention_lease.period](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#ccr-index-soft-deletes-retention-period) for more details. :::: diff --git a/deploy-manage/tools/cross-cluster-replication/_perform_update_or_delete_by_query.md b/deploy-manage/tools/cross-cluster-replication/_perform_update_or_delete_by_query.md index e2e7fabcb9..9f9204ad26 100644 --- a/deploy-manage/tools/cross-cluster-replication/_perform_update_or_delete_by_query.md +++ b/deploy-manage/tools/cross-cluster-replication/_perform_update_or_delete_by_query.md @@ -47,7 +47,7 @@ It is possible to update or delete the documents but you can only perform these ``` ::::{tip} - If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see [index.soft_deletes.retention_lease.period](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#ccr-index-soft-deletes-retention-period) for more details. + If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see [index.soft_deletes.retention_lease.period](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#ccr-index-soft-deletes-retention-period) for more details. :::: diff --git a/deploy-manage/tools/cross-cluster-replication/bi-directional-disaster-recovery.md b/deploy-manage/tools/cross-cluster-replication/bi-directional-disaster-recovery.md index 5e2f1f8fe6..aaabf9b64e 100644 --- a/deploy-manage/tools/cross-cluster-replication/bi-directional-disaster-recovery.md +++ b/deploy-manage/tools/cross-cluster-replication/bi-directional-disaster-recovery.md @@ -11,7 +11,7 @@ mapped_pages: Learn how to set up disaster recovery between two clusters based on bi-directional {{ccr}}. The following tutorial is designed for data streams which support [update by query](../../../manage-data/data-store/data-streams/use-data-stream.md#update-docs-in-a-data-stream-by-query) and [delete by query](../../../manage-data/data-store/data-streams/use-data-stream.md#delete-docs-in-a-data-stream-by-query). You can only perform these actions on the leader index. -This tutorial works with {{ls}} as the source of ingestion. It takes advantage of a {{ls}} feature where [the {{ls}} output to {{es}}](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) can be load balanced across an array of hosts specified. {{beats}} and {{agents}} currently do not support multiple outputs. It should also be possible to set up a proxy (load balancer) to redirect traffic without {{ls}} in this tutorial. +This tutorial works with {{ls}} as the source of ingestion. It takes advantage of a {{ls}} feature where [the {{ls}} output to {{es}}](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) can be load balanced across an array of hosts specified. {{beats}} and {{agents}} currently do not support multiple outputs. It should also be possible to set up a proxy (load balancer) to redirect traffic without {{ls}} in this tutorial. * Setting up a remote cluster on `clusterA` and `clusterB`. * Setting up bi-directional cross-cluster replication with exclusion patterns. diff --git a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-prerequisites.md b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-prerequisites.md index c7c4b3731c..74b4e41d0c 100644 --- a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-prerequisites.md +++ b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-prerequisites.md @@ -10,5 +10,5 @@ To complete this tutorial, you need: * The `manage` cluster privilege on the local cluster. * A license on both clusters that includes {{ccr}}. [Activate a free 30-day trial](../../license/manage-your-license-in-self-managed-cluster.md). * An index on the remote cluster that contains the data you want to replicate. This tutorial uses the sample eCommerce orders data set. [Load sample data](../../../explore-analyze/index.md#gs-get-data-into-kibana). -* In the local cluster, all nodes with the `master` [node role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) must also have the [`remote_cluster_client`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#remote-node) role. The local cluster must also have at least one node with both a data role and the [`remote_cluster_client`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#remote-node) role. Individual tasks for coordinating replication scale based on the number of data nodes with the `remote_cluster_client` role in the local cluster. +* In the local cluster, all nodes with the `master` [node role](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) must also have the [`remote_cluster_client`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#remote-node) role. The local cluster must also have at least one node with both a data role and the [`remote_cluster_client`](../../distributed-architecture/clusters-nodes-shards/node-roles.md#remote-node) role. Individual tasks for coordinating replication scale based on the number of data nodes with the `remote_cluster_client` role in the local cluster. diff --git a/deploy-manage/tools/cross-cluster-replication/ccr-recreate-follower-index.md b/deploy-manage/tools/cross-cluster-replication/ccr-recreate-follower-index.md index 1faca2059a..601d4fa062 100644 --- a/deploy-manage/tools/cross-cluster-replication/ccr-recreate-follower-index.md +++ b/deploy-manage/tools/cross-cluster-replication/ccr-recreate-follower-index.md @@ -5,7 +5,7 @@ mapped_pages: # Recreate a follower index [ccr-recreate-follower-index] -When a document is updated or deleted, the underlying operation is retained in the Lucene index for a period of time defined by the [`index.soft_deletes.retention_lease.period`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#ccr-index-soft-deletes-retention-period) parameter. You configure this setting on the [leader index](../cross-cluster-replication.md#ccr-leader-requirements). +When a document is updated or deleted, the underlying operation is retained in the Lucene index for a period of time defined by the [`index.soft_deletes.retention_lease.period`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#ccr-index-soft-deletes-retention-period) parameter. You configure this setting on the [leader index](../cross-cluster-replication.md#ccr-leader-requirements). When a follower index starts, it acquires a retention lease from the leader index. This lease informs the leader that it should not allow a soft delete to be pruned until either the follower indicates that it has received the operation, or until the lease expires. diff --git a/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns.md b/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns.md index f30bc5e7bb..525fdd0bd5 100644 --- a/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns.md +++ b/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns.md @@ -8,7 +8,7 @@ mapped_pages: To replicate time series indices, you configure an auto-follow pattern so that each new index in the series is replicated automatically. Whenever the name of a new index on the remote cluster matches the auto-follow pattern, a corresponding follower index is added to the local cluster. ::::{note} -Auto-follow patterns only match open indices on the remote cluster that have all primary shards started. Auto-follow patterns do not match indices that can’t be used for {{ccr-init}} such as [closed indices](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) or [{{search-snaps}}](../snapshot-and-restore/searchable-snapshots.md). Avoid using an auto-follow pattern that matches indices with a [read or write block](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html#index-block-settings). These blocks prevent follower indices from replicating such indices. +Auto-follow patterns only match open indices on the remote cluster that have all primary shards started. Auto-follow patterns do not match indices that can’t be used for {{ccr-init}} such as [closed indices](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) or [{{search-snaps}}](../snapshot-and-restore/searchable-snapshots.md). Avoid using an auto-follow pattern that matches indices with a [read or write block](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-block-settings.md#index-block-settings). These blocks prevent follower indices from replicating such indices. :::: diff --git a/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md b/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md index 56a271c443..53d3dcec4d 100644 --- a/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md +++ b/deploy-manage/tools/cross-cluster-replication/set-up-cross-cluster-replication.md @@ -23,7 +23,7 @@ In this guide, you’ll learn how to: You can manually create follower indices to replicate specific indices on a remote cluster, or configure auto-follow patterns to replicate rolling time series indices. ::::{tip} -If you want to replicate data across clusters in the cloud, you can [configure remote clusters on {{ess}}](https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html). Then, you can [search across clusters](../../../solutions/search/cross-cluster-search.md) and set up {{ccr}}. +If you want to replicate data across clusters in the cloud, you can [configure remote clusters on {{ess}}](/deploy-manage/remote-clusters/ec-enable-ccs.md). Then, you can [search across clusters](../../../solutions/search/cross-cluster-search.md) and set up {{ccr}}. :::: diff --git a/deploy-manage/tools/snapshot-and-restore.md b/deploy-manage/tools/snapshot-and-restore.md index e013098f9d..6e83a3ce5f 100644 --- a/deploy-manage/tools/snapshot-and-restore.md +++ b/deploy-manage/tools/snapshot-and-restore.md @@ -136,7 +136,7 @@ A **feature state** contains the indices and data streams used to store configur To retrieve a list of feature states, use the [Features API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features). :::: -A feature state typically includes one or more [system indices or system data streams](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#system-indices). It may also include regular indices and data streams used by the feature. For example, a feature state may include a regular index that contains the feature’s execution history. Storing this history in a regular index lets you more easily search it. +A feature state typically includes one or more [system indices or system data streams](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#system-indices). It may also include regular indices and data streams used by the feature. For example, a feature state may include a regular index that contains the feature’s execution history. Storing this history in a regular index lets you more easily search it. In Elasticsearch 8.0 and later versions, feature states are the only way to back up and restore system indices and system data streams. @@ -183,7 +183,7 @@ You can’t restore an index to an earlier version of Elasticsearch. For example A compatible snapshot can contain indices created in an older incompatible version. For example, a snapshot of a 7.17 cluster can contain an index created in 6.8. Restoring the 6.8 index to an 8.17 cluster fails unless you can use the [archive functionality](/deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md). Keep this in mind if you take a snapshot before upgrading a cluster. -As a workaround, you can first restore the index to another cluster running the latest version of Elasticsearch that’s compatible with both the index and your current cluster. You can then use [reindex-from-remote](https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html#reindex-from-remote) to rebuild the index on your current cluster. Reindex from remote is only possible if the index’s [`_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html) is enabled. +As a workaround, you can first restore the index to another cluster running the latest version of Elasticsearch that’s compatible with both the index and your current cluster. You can then use [reindex-from-remote](https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html#reindex-from-remote) to rebuild the index on your current cluster. Reindex from remote is only possible if the index’s [`_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md) is enabled. Reindexing from remote can take significantly longer than restoring a snapshot. Before you start, test the reindex from remote process with a subset of the data to estimate your time requirements. diff --git a/deploy-manage/tools/snapshot-and-restore/azure-repository.md b/deploy-manage/tools/snapshot-and-restore/azure-repository.md index 7b78c514b4..1313f9b162 100644 --- a/deploy-manage/tools/snapshot-and-restore/azure-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/azure-repository.md @@ -100,7 +100,7 @@ The following list describes the available client settings. Those that must be s : A shared access signatures (SAS) token, which the repository’s internal Azure client uses for authentication. The SAS token must have read (r), write (w), list (l), and delete (d) permissions for the repository base path and all its contents. These permissions must be granted for the blob service (b) and apply to resource types service (s), container (c), and object (o). Alternatively, use `key`. `azure.client.CLIENT_NAME.timeout` -: The client side timeout for any single request to Azure, as a [time unit](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units). For example, a value of `5s` specifies a 5 second timeout. There is no default value, which means that {{es}} uses the [default value](https://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.md#setTimeoutIntervalInMs(java.lang.Integer)) set by the Azure client. +: The client side timeout for any single request to Azure, as a [time unit](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units). For example, a value of `5s` specifies a 5 second timeout. There is no default value, which means that {{es}} uses the [default value](https://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.md#setTimeoutIntervalInMs(java.lang.Integer)) set by the Azure client. `azure.client.CLIENT_NAME.endpoint` : The Azure endpoint to connect to. It must include the protocol used to connect to Azure. @@ -162,7 +162,7 @@ PUT _snapshot/my_backup : When set to `true` metadata files are stored in compressed format. This setting doesn’t affect index files that are already compressed by default. Defaults to `true`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). diff --git a/deploy-manage/tools/snapshot-and-restore/azure-storage-repository.md b/deploy-manage/tools/snapshot-and-restore/azure-storage-repository.md index 8154f2566d..8d9780f19c 100644 --- a/deploy-manage/tools/snapshot-and-restore/azure-storage-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/azure-storage-repository.md @@ -15,7 +15,7 @@ Add your Azure Storage Container as a repository to the platform: 1. [Log into the Cloud UI](../../deploy/cloud-enterprise/log-into-cloud-ui.md). 2. Go to **Platform > Repositories** and add the following snapshot repository configuration under the advanced mode: - If needed, set additional options for configuring chunk_size, compressions, and retries. Check the [supported settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-azure.html#repository-azure-repository-settings). + If needed, set additional options for configuring chunk_size, compressions, and retries. Check the [supported settings](/deploy-manage/tools/snapshot-and-restore/azure-repository.md#repository-azure-repository-settings). ```json { diff --git a/deploy-manage/tools/snapshot-and-restore/cloud-enterprise.md b/deploy-manage/tools/snapshot-and-restore/cloud-enterprise.md index ddc461598f..d3363f97f0 100644 --- a/deploy-manage/tools/snapshot-and-restore/cloud-enterprise.md +++ b/deploy-manage/tools/snapshot-and-restore/cloud-enterprise.md @@ -11,11 +11,11 @@ When a repository is specified, a snapshot is taken every 30 minutes by default. Snapshots are configured and restored using the [snapshot and restore feature](../snapshot-and-restore.md). -Elastic Cloud Enterprise installations support the following {{es}} [snapshot repository types](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#ess-repo-types): +Elastic Cloud Enterprise installations support the following {{es}} [snapshot repository types](/deploy-manage/tools/snapshot-and-restore/self-managed.md#ess-repo-types): -* [Azure](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-azure.html) -* [Google Cloud Storage](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-gcs.html) -* [AWS S3](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-s3.html) +* [Azure](/deploy-manage/tools/snapshot-and-restore/azure-repository.md) +* [Google Cloud Storage](/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md) +* [AWS S3](/deploy-manage/tools/snapshot-and-restore/s3-repository.md) ::::{note} No repository types other than those listed are supported in the Elastic Cloud Enterprise platform, even if they are supported by {{es}}. @@ -26,7 +26,7 @@ To configure Google Cloud Storage (GCS) as a snapshot repository, you must use [ To configure Microsoft Azure Storage as a snapshot repository, refer to [Snapshotting to Azure Storage](azure-storage-repository.md). -For more details about how snapshots are used with Elasticsearch, check [Snapshot and Restore](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html). You can also review the official documentation for these storage repository options: +For more details about how snapshots are used with Elasticsearch, check [Snapshot and Restore](/deploy-manage/tools/snapshot-and-restore.md). You can also review the official documentation for these storage repository options: * [Amazon S3 documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.md) * [Microsoft Azure storage documentation](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account) diff --git a/deploy-manage/tools/snapshot-and-restore/cloud-on-k8s.md b/deploy-manage/tools/snapshot-and-restore/cloud-on-k8s.md index 3e99b456f9..06d8f1ff59 100644 --- a/deploy-manage/tools/snapshot-and-restore/cloud-on-k8s.md +++ b/deploy-manage/tools/snapshot-and-restore/cloud-on-k8s.md @@ -13,7 +13,7 @@ Snapshots are essential for recovering Elasticsearch indices in case of accident To set up automated snapshots for Elasticsearch on Kubernetes you have to: 1. Register the snapshot repository with the Elasticsearch API. -2. Set up a Snapshot Lifecycle Management Policy through [API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-slm) or the [Kibana UI](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) +2. Set up a Snapshot Lifecycle Management Policy through [API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-slm) or the [Kibana UI](/deploy-manage/tools/snapshot-and-restore.md) ::::{note} Support for S3, GCS and Azure repositories is bundled in Elasticsearch by default from version 8.0. On older versions of Elasticsearch, or if another snapshot repository plugin should be used, you have to [Install a snapshot repository plugin](#k8s-install-plugin). @@ -42,7 +42,7 @@ The final example illustrates how to configure secure and trusted communication #### Configure GCS credentials through the Elasticsearch keystore [k8s-secure-settings] -The Elasticsearch GCS repository plugin requires a JSON file that contains service account credentials. These need to be added as secure settings to the Elasticsearch keystore. For more details, check [Google Cloud Storage Repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-gcs.html). +The Elasticsearch GCS repository plugin requires a JSON file that contains service account credentials. These need to be added as secure settings to the Elasticsearch keystore. For more details, check [Google Cloud Storage Repository](/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md). Using ECK, you can automatically inject secure settings into a cluster node by providing them through a secret in the Elasticsearch Spec. @@ -306,7 +306,7 @@ Follow the [Azure documentation](https://learn.microsoft.com/en-us/azure/aks/wor --sku Standard_ZRS <1> ``` - 1. This can be any of the supported storage account types `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`, `Standard_RAGRS` but not `Premium_LRS` see [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-azure.html) for details. + 1. This can be any of the supported storage account types `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`, `Standard_RAGRS` but not `Premium_LRS` see [the Elasticsearch documentation](/deploy-manage/tools/snapshot-and-restore/azure-repository.md) for details. 7. Create a container in the storage account, for this example `es-snapshots`. diff --git a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md index 0fe0f2b104..168db5d0a8 100644 --- a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md @@ -27,7 +27,7 @@ The guide also provides tips for creating dedicated cluster state snapshots and * You can only take a snapshot from a running cluster with an elected [master node](../../distributed-architecture/clusters-nodes-shards/node-roles.md#master-node-role). * A snapshot repository must be [registered](self-managed.md) and available to the cluster. -* The cluster’s global metadata must be readable. To include an index in a snapshot, the index and its metadata must also be readable. Ensure there aren’t any [cluster blocks](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-read-only) or [index blocks](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html) that prevent read access. +* The cluster’s global metadata must be readable. To include an index in a snapshot, the index and its metadata must also be readable. Ensure there aren’t any [cluster blocks](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-read-only) or [index blocks](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-block-settings.md) that prevent read access. ## Considerations [create-snapshot-considerations] @@ -37,7 +37,7 @@ The guide also provides tips for creating dedicated cluster state snapshots and * Each snapshot is logically independent. You can delete a snapshot without affecting other snapshots. * Taking a snapshot can temporarily pause shard allocations. See [Snapshots and shard allocation](../snapshot-and-restore.md#snapshots-shard-allocation). * Taking a snapshot doesn’t block indexing or other requests. However, the snapshot won’t include changes made after the snapshot process starts. -* You can take multiple snapshots at the same time. The [`snapshot.max_concurrent_operations`](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-settings.html#snapshot-max-concurrent-ops) cluster setting limits the maximum number of concurrent snapshot operations. +* You can take multiple snapshots at the same time. The [`snapshot.max_concurrent_operations`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/snapshot-restore-settings.md#snapshot-max-concurrent-ops) cluster setting limits the maximum number of concurrent snapshot operations. * If you include a data stream in a snapshot, the snapshot also includes the stream’s backing indices and metadata. You can also include only specific backing indices in a snapshot. However, the snapshot won’t include the data stream’s metadata or its other backing indices. @@ -132,7 +132,7 @@ PUT _slm/policy/nightly-snapshots ``` 1. When to take snapshots, written in [Cron syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/_schedule_types.html#schedule-cron). -2. Snapshot name. Supports [date math](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-date-math-index-names). To prevent naming conflicts, the policy also appends a UUID to each snapshot name. +2. Snapshot name. Supports [date math](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-date-math-index-names). To prevent naming conflicts, the policy also appends a UUID to each snapshot name. 3. [Registered snapshot repository](self-managed.md) used to store the policy’s snapshots. 4. Data streams and indices to include in the policy’s snapshots. 5. If `true`, the policy’s snapshots include the cluster state. This also includes all feature states by default. To only include specific feature states, see [Back up a specific feature state](#back-up-specific-feature-state). diff --git a/deploy-manage/tools/snapshot-and-restore/ec-aws-custom-repository.md b/deploy-manage/tools/snapshot-and-restore/ec-aws-custom-repository.md index 5bb7f5da60..bb1792231a 100644 --- a/deploy-manage/tools/snapshot-and-restore/ec-aws-custom-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/ec-aws-custom-repository.md @@ -55,7 +55,7 @@ You can use the Elasticsearch Service Keystore to store the credentials to acces * `s3.client.secondary.access_key` * `s3.client.secondary.secret_key` -5. Perform a cluster restart to [reload the secure settings](https://www.elastic.co/guide/en/cloud/current/ec-configuring-keystore.html#ec-add-secret-values). +5. Perform a cluster restart to [reload the secure settings](/deploy-manage/security/secure-settings.md#ec-add-secret-values). ## Create the repository [ec-create-aws-repository] diff --git a/deploy-manage/tools/snapshot-and-restore/ec-gcs-snapshotting.md b/deploy-manage/tools/snapshot-and-restore/ec-gcs-snapshotting.md index ca986a155a..8a691efe8b 100644 --- a/deploy-manage/tools/snapshot-and-restore/ec-gcs-snapshotting.md +++ b/deploy-manage/tools/snapshot-and-restore/ec-gcs-snapshotting.md @@ -14,7 +14,7 @@ You’ll need to have an existing Google Cloud account and have the appropriate 1. Create a [service account key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) in your Google Cloud project. - The service account should be configured to have permission to read, write, and list the bucket objects. For more information, refer to [Recommended bucket permission](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-gcs.html#repository-gcs-bucket-permission) in the Elasticsearch docs. + The service account should be configured to have permission to read, write, and list the bucket objects. For more information, refer to [Recommended bucket permission](/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md#repository-gcs-bucket-permission) in the Elasticsearch docs. 2. Save the service account key in JSON file format. You are going to use it later to configure your Elasticsearch deployment for snapshotting. diff --git a/deploy-manage/tools/snapshot-and-restore/ech-aws-custom-repository.md b/deploy-manage/tools/snapshot-and-restore/ech-aws-custom-repository.md index f2db5ba595..c16dbcbf10 100644 --- a/deploy-manage/tools/snapshot-and-restore/ech-aws-custom-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/ech-aws-custom-repository.md @@ -55,7 +55,7 @@ You can use the Elasticsearch Add-On for Heroku Keystore to store the credential * `s3.client.secondary.access_key` * `s3.client.secondary.secret_key` -5. Perform a cluster restart to [reload the secure settings](https://www.elastic.co/guide/en/cloud/current/ec-configuring-keystore.html#ec-add-secret-values). +5. Perform a cluster restart to [reload the secure settings](/deploy-manage/security/secure-settings.md#ec-add-secret-values). ## Create the repository [ech-create-aws-repository] diff --git a/deploy-manage/tools/snapshot-and-restore/ech-gcs-snapshotting.md b/deploy-manage/tools/snapshot-and-restore/ech-gcs-snapshotting.md index 2689478436..661811bcf1 100644 --- a/deploy-manage/tools/snapshot-and-restore/ech-gcs-snapshotting.md +++ b/deploy-manage/tools/snapshot-and-restore/ech-gcs-snapshotting.md @@ -14,7 +14,7 @@ You’ll need to have an existing Google Cloud account and have the appropriate 1. Create a [service account key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) in your Google Cloud project. - The service account should be configured to have permission to read, write, and list the bucket objects. For more information, refer to [Recommended bucket permission](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-gcs.html#repository-gcs-bucket-permission) in the Elasticsearch docs. + The service account should be configured to have permission to read, write, and list the bucket objects. For more information, refer to [Recommended bucket permission](/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md#repository-gcs-bucket-permission) in the Elasticsearch docs. 2. Save the service account key in JSON file format. You are going to use it later to configure your Elasticsearch deployment for snapshotting. diff --git a/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-gcs-repository.md b/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-gcs-repository.md index edcb16406e..20faca102f 100644 --- a/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-gcs-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-gcs-repository.md @@ -26,7 +26,7 @@ Add your Google Cloud Storage bucket as a repository to the platform: 1. [Log into the Cloud UI](../../deploy/cloud-enterprise/log-into-cloud-ui.md). 2. Go to **Platform > Repositories** and add the following snapshot repository configuration under the advanced mode: - Repository GCS (check: [supported settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-gcs.html#repository-gcs-repository)) + Repository GCS (check: [supported settings](/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md#repository-gcs-repository)) ```json { diff --git a/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md b/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md index cd27f5ee31..49c9053cb9 100644 --- a/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md @@ -191,7 +191,7 @@ The following settings are supported: : When set to `true` metadata files are stored in compressed format. This setting doesn’t affect index files that are already compressed by default. Defaults to `true`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). diff --git a/deploy-manage/tools/snapshot-and-restore/manage-snapshot-repositories.md b/deploy-manage/tools/snapshot-and-restore/manage-snapshot-repositories.md index 58a3c61f0a..ad2c8f9d58 100644 --- a/deploy-manage/tools/snapshot-and-restore/manage-snapshot-repositories.md +++ b/deploy-manage/tools/snapshot-and-restore/manage-snapshot-repositories.md @@ -33,7 +33,7 @@ If you manage your own Elasticsearch cluster, you can use the following built-in Other repository types are available through official plugins: -* [Hadoop Distributed File System (HDFS)](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs.html) +* [Hadoop Distributed File System (HDFS)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/repository-hdfs.md) ### Elastic Cloud Hosted diff --git a/deploy-manage/tools/snapshot-and-restore/read-only-url-repository.md b/deploy-manage/tools/snapshot-and-restore/read-only-url-repository.md index 61625bdadd..193b2fbf33 100644 --- a/deploy-manage/tools/snapshot-and-restore/read-only-url-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/read-only-url-repository.md @@ -27,7 +27,7 @@ PUT _snapshot/my_read_only_url_repository ## Repository settings [read-only-url-repository-settings] `chunk_size` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). +: (Optional, [byte value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). `http_max_retries` : (Optional, integer) Maximum number of retries for `http` and `https` URLs. Defaults to `5`. @@ -42,7 +42,7 @@ PUT _snapshot/my_read_only_url_repository : (Optional, integer) Maximum number of snapshots the repository can contain. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). @@ -56,7 +56,7 @@ PUT _snapshot/my_read_only_url_repository * `https` * `jar` -URLs using the `http`, `https`, or `ftp` protocols must be explicitly allowed with the [`repositories.url.allowed_urls`](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-settings.html#repositories-url-allowed) cluster setting. This setting supports wildcards in the place of a host, path, query, or fragment in the URL. +URLs using the `http`, `https`, or `ftp` protocols must be explicitly allowed with the [`repositories.url.allowed_urls`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/snapshot-restore-settings.md#repositories-url-allowed) cluster setting. This setting supports wildcards in the place of a host, path, query, or fragment in the URL. URLs using the `file` protocol must point to the location of a shared filesystem accessible to all master and data nodes in the cluster. This location must be registered in the `path.repo` setting. You don’t need to register URLs using the `ftp`, `http`, `https`, or `jar` protocols in the `path.repo` setting. diff --git a/deploy-manage/tools/snapshot-and-restore/s3-repository.md b/deploy-manage/tools/snapshot-and-restore/s3-repository.md index 61563dea5d..aed10065c0 100644 --- a/deploy-manage/tools/snapshot-and-restore/s3-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/s3-repository.md @@ -103,7 +103,7 @@ The following list contains the available client settings. Those that must be st : The password to connect to the `proxy.host` with. `read_timeout` -: ([time value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units)) The maximum time {{es}} will wait to receive the next byte of data over an established, open connection to the repository before it closes the connection. The default value is 50 seconds. +: ([time value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units)) The maximum time {{es}} will wait to receive the next byte of data over an established, open connection to the repository before it closes the connection. The default value is 50 seconds. `max_connections` : The maximum number of concurrent connections to S3. The default value is `50`. @@ -175,7 +175,7 @@ The following settings are supported: : When set to `true` metadata files are stored in compressed format. This setting doesn’t affect index files that are already compressed by default. Defaults to `true`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). @@ -385,7 +385,7 @@ You can perform some basic checks of the suitability of your storage system usin Most storage systems can be configured to log the details of their interaction with {{es}}. If you are investigating a suspected incompatibility with AWS S3, it is usually simplest to collect these logs and provide them to the supplier of your storage system for further analysis. If the incompatibility is not clear from the logs emitted by the storage system, configure {{es}} to log every request it makes to the S3 API by [setting the logging level](../../monitor/logging-configuration/elasticsearch-log4j-configuration-self-managed.md#configuring-logging-levels) of the `com.amazonaws.request` logger to `DEBUG`. -To prevent leaking sensitive information such as credentials and keys in logs, {{es}} rejects configuring this logger at high verbosity unless [insecure network trace logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#http-rest-request-tracer) is enabled. To do so, you must explicitly enable it on each node by setting the system property `es.insecure_network_trace_enabled` to `true`. +To prevent leaking sensitive information such as credentials and keys in logs, {{es}} rejects configuring this logger at high verbosity unless [insecure network trace logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md#http-rest-request-tracer) is enabled. To do so, you must explicitly enable it on each node by setting the system property `es.insecure_network_trace_enabled` to `true`. Once enabled, you can configure the `com.amazonaws.request` logger: @@ -398,7 +398,7 @@ PUT /_cluster/settings } ``` -Collect the Elasticsearch logs covering the time period of the failed analysis from all nodes in your cluster and share them with the supplier of your storage system along with the analysis response so they can use them to determine the problem. See the [AWS Java SDK](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-../../monitor/logging-configuration/elasticsearch-log4j-configuration-self-managed.md) documentation for further information, including details about other loggers that can be used to obtain even more verbose logs. When you have finished collecting the logs needed by your supplier, set the logger settings back to `null` to return to the default logging configuration and disable insecure network trace logging again. See [Logger](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-logger) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) for more information. +Collect the Elasticsearch logs covering the time period of the failed analysis from all nodes in your cluster and share them with the supplier of your storage system along with the analysis response so they can use them to determine the problem. See the [AWS Java SDK](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-../../monitor/logging-configuration/elasticsearch-log4j-configuration-self-managed.md) documentation for further information, including details about other loggers that can be used to obtain even more verbose logs. When you have finished collecting the logs needed by your supplier, set the logger settings back to `null` to return to the default logging configuration and disable insecure network trace logging again. See [Logger](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-logger) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) for more information. ## Linearizable register implementation [repository-s3-linearizable-registers] diff --git a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md index b3fc380e21..1ef7d11b3d 100644 --- a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md @@ -18,13 +18,13 @@ By default, {{search-snap}} indices have no replicas. The underlying snapshot pr If a node fails and {{search-snap}} shards need to be recovered elsewhere, there is a brief window of time while {{es}} allocates the shards to other nodes where the cluster health will not be `green`. Searches that hit these shards may fail or return partial results until the shards are reallocated to healthy nodes. -You typically manage {{search-snaps}} through {{ilm-init}}. The [searchable snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) action automatically converts a regular index into a {{search-snap}} index when it reaches the `cold` or `frozen` phase. You can also make indices in existing snapshots searchable by manually mounting them using the [mount snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) API. +You typically manage {{search-snaps}} through {{ilm-init}}. The [searchable snapshots](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) action automatically converts a regular index into a {{search-snap}} index when it reaches the `cold` or `frozen` phase. You can also make indices in existing snapshots searchable by manually mounting them using the [mount snapshot](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) API. To mount an index from a snapshot that contains multiple indices, we recommend creating a [clone](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) of the snapshot that contains only the index you want to search, and mounting the clone. You should not delete a snapshot if it has any mounted indices, so creating a clone enables you to manage the lifecycle of the backup snapshot independently of any {{search-snaps}}. If you use {{ilm-init}} to manage your {{search-snaps}} then it will automatically look after cloning the snapshot as needed. You can control the allocation of the shards of {{search-snap}} indices using the same mechanisms as for regular indices. For example, you could use [Index-level shard allocation filtering](../../distributed-architecture/shard-allocation-relocation-recovery/index-level-shard-allocation.md) to restrict {{search-snap}} shards to a subset of your nodes. -The speed of recovery of a {{search-snap}} index is limited by the repository setting `max_restore_bytes_per_sec` and the node setting `indices.recovery.max_bytes_per_sec` just like a normal restore operation. By default `max_restore_bytes_per_sec` is unlimited, but the default for `indices.recovery.max_bytes_per_sec` depends on the configuration of the node. See [Recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings). +The speed of recovery of a {{search-snap}} index is limited by the repository setting `max_restore_bytes_per_sec` and the node setting `indices.recovery.max_bytes_per_sec` just like a normal restore operation. By default `max_restore_bytes_per_sec` is unlimited, but the default for `indices.recovery.max_bytes_per_sec` depends on the configuration of the node. See [Recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md#recovery-settings). We recommend that you [force-merge](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) indices to a single segment per shard before taking a snapshot that will be mounted as a {{search-snap}} index. Each read from a snapshot repository takes time and costs money, and the fewer segments there are the fewer reads are needed to restore the snapshot or to respond to a search. @@ -42,7 +42,7 @@ Use any of the following repository types with searchable snapshots: * [AWS S3](s3-repository.md) * [Google Cloud Storage](google-cloud-storage-repository.md) * [Azure Blob Storage](azure-repository.md) -* [Hadoop Distributed File Store (HDFS)](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs.html) +* [Hadoop Distributed File Store (HDFS)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/repository-hdfs.md) * [Shared filesystems](shared-file-system-repository.md) such as NFS * [Read-only HTTP and HTTPS repositories](read-only-url-repository.md) @@ -103,7 +103,7 @@ For optimal results, allow {{ilm-init}} to manage snapshots automatically. $$$searchable-snapshots-shared-cache$$$ `xpack.searchable.snapshot.shared_cache.size` -: ([Static](../../deploy/self-managed/configure-elasticsearch.md#static-cluster-setting)) Disk space reserved for the shared cache of partially mounted indices. Accepts a percentage of total disk space or an absolute [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units). Defaults to `90%` of total disk space for dedicated frozen data tier nodes. Otherwise defaults to `0b`. +: ([Static](../../deploy/self-managed/configure-elasticsearch.md#static-cluster-setting)) Disk space reserved for the shared cache of partially mounted indices. Accepts a percentage of total disk space or an absolute [byte value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#byte-units). Defaults to `90%` of total disk space for dedicated frozen data tier nodes. Otherwise defaults to `0b`. `xpack.searchable.snapshot.shared_cache.size.max_headroom` : ([Static](../../deploy/self-managed/configure-elasticsearch.md#static-cluster-setting), [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) For dedicated frozen tier nodes, the max headroom to maintain. If `xpack.searchable.snapshot.shared_cache.size` is not explicitly set, this setting defaults to `100GB`. Otherwise it defaults to `-1` (not set). You can only configure this setting if `xpack.searchable.snapshot.shared_cache.size` is set as a percentage. diff --git a/deploy-manage/tools/snapshot-and-restore/self-managed.md b/deploy-manage/tools/snapshot-and-restore/self-managed.md index 984031e170..331b44d8d7 100644 --- a/deploy-manage/tools/snapshot-and-restore/self-managed.md +++ b/deploy-manage/tools/snapshot-and-restore/self-managed.md @@ -25,7 +25,7 @@ In this guide, you’ll learn how to: * [Cluster privileges](../../users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-cluster): `monitor`, `manage_slm`, `cluster:admin/snapshot`, and `cluster:admin/repository` * [Index privilege](../../users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-indices): `all` on the `monitor` index -* To register a snapshot repository, the cluster’s global metadata must be writeable. Ensure there aren’t any [cluster blocks](https://www.elastic.co/guide/en/elasticsearch/reference/current/misc-cluster-settings.html#cluster-read-only) that prevent write access. +* To register a snapshot repository, the cluster’s global metadata must be writeable. Ensure there aren’t any [cluster blocks](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md#cluster-read-only) that prevent write access. ## Considerations [snapshot-repo-considerations] @@ -90,7 +90,7 @@ If you manage your own {{es}} cluster, you can use the following built-in snapsh $$$snapshots-repository-plugins$$$ Other repository types are available through official plugins: -* [Hadoop Distributed File System (HDFS)](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs.html) +* [Hadoop Distributed File System (HDFS)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/repository-hdfs.md) You can also use alternative storage implementations with these repository types, as long as the alternative implementation is fully compatible. For instance, [MinIO](https://minio.io) provides an alternative implementation of the AWS S3 API and you can use MinIO with the [`s3` repository type](s3-repository.md). diff --git a/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md b/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md index 3cf7f392a9..ae12991617 100644 --- a/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md @@ -136,7 +136,7 @@ PUT _snapshot/my_fs_backup ## Repository settings [filesystem-repository-settings] `chunk_size` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). +: (Optional, [byte value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). `compress` : (Optional, Boolean) If `true`, metadata files, such as index mappings and settings, are compressed in snapshots. Data files are not compressed. Defaults to `true`. @@ -148,7 +148,7 @@ PUT _snapshot/my_fs_backup : (Optional, integer) Maximum number of snapshots the repository can contain. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). diff --git a/deploy-manage/tools/snapshot-and-restore/source-only-repository.md b/deploy-manage/tools/snapshot-and-restore/source-only-repository.md index e94da6100f..5b1f4d69b7 100644 --- a/deploy-manage/tools/snapshot-and-restore/source-only-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/source-only-repository.md @@ -38,7 +38,7 @@ PUT _snapshot/my_src_only_repository ## Repository settings [source-only-repository-settings] `chunk_size` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). +: (Optional, [byte value](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#byte-units)) Maximum size of files in snapshots. In snapshots, files larger than this are broken down into chunks of this size or smaller. Defaults to `null` (unlimited file size). `compress` : (Optional, Boolean) If `true`, metadata files, such as index mappings and settings, are compressed in snapshots. Data files are not compressed. Defaults to `true`. @@ -53,7 +53,7 @@ PUT _snapshot/my_src_only_repository : (Optional, integer) Maximum number of snapshots the repository can contain. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. `max_restore_bytes_per_sec` -: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). +: (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot restore rate per node. Defaults to unlimited. Note that restores are also throttled through [recovery settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md). `max_snapshot_bytes_per_sec` : (Optional, [byte value](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units)) Maximum snapshot creation rate per node. Defaults to `40mb` per second. Note that if the [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services) are set, then it defaults to unlimited, and the rate is additionally throttled through [recovery settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html). diff --git a/deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md b/deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md index 0c08ec28aa..4f5fa6dd55 100644 --- a/deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md +++ b/deploy-manage/upgrade/deployment-or-cluster/reading-indices-from-older-elasticsearch-versions.md @@ -16,23 +16,23 @@ For this, {{es}} has the ability to access older snapshot repositories (going ba Old mappings are imported as much "as-is" as possible into {{es}} 8, but only provide regular query capabilities on a select subset of fields: -* [Numeric types](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) -* [`boolean` type](https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html) -* [`ip` type](https://www.elastic.co/guide/en/elasticsearch/reference/current/ip.html) -* [`geo_point` type](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) -* [`date` types](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html): the date `format` setting on date fields is supported as long as it behaves similarly across these versions. In case it is not, for example [when using custom date formats](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/migrate-to-java-time.html), this field can be updated on legacy indices so that it can be changed by a user if need be. -* [`keyword` type](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#keyword-field-type): the `normalizer` setting on keyword fields is supported as long as it behaves similarly across these versions. In case it is not, this field can be updated on legacy indices if need be. -* [`text` type](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html#text-field-type): scoring capabilities are limited, and all queries return constant scores that are equal to 1.0. The `analyzer` settings on text fields are supported as long as they behave similarly across these versions. In case they do not, they can be updated on legacy indices if need be. -* [Multi-fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) -* [Field aliases](https://www.elastic.co/guide/en/elasticsearch/reference/current/field-alias.html) -* [`object`](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) fields +* [Numeric types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) +* [`boolean` type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/boolean.md) +* [`ip` type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ip.md) +* [`geo_point` type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) +* [`date` types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md): the date `format` setting on date fields is supported as long as it behaves similarly across these versions. In case it is not, for example [when using custom date formats](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/migrate-to-java-time.html), this field can be updated on legacy indices so that it can be changed by a user if need be. +* [`keyword` type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md#keyword-field-type): the `normalizer` setting on keyword fields is supported as long as it behaves similarly across these versions. In case it is not, this field can be updated on legacy indices if need be. +* [`text` type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md#text-field-type): scoring capabilities are limited, and all queries return constant scores that are equal to 1.0. The `analyzer` settings on text fields are supported as long as they behave similarly across these versions. In case they do not, they can be updated on legacy indices if need be. +* [Multi-fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md) +* [Field aliases](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-alias.md) +* [`object`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) fields * some basic metadata fields, e.g. `_type` for querying {{es}} 5 indices * [runtime fields](../../../manage-data/data-store/mapping/map-runtime-field.md) -* [`_source` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html) +* [`_source` field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md) {{es}} 5 indices with mappings that have [multiple mapping types](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html) are collapsed together on a best-effort basis before they are imported. -In case the auto-import of mappings does not work, or the new {{es}} version can’t make sense of the mapping, it falls back to importing the index without the mapping, but stores the original mapping in the [_meta](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html) section of the imported index. The legacy mapping can then be introspected using the [GET mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) API and an updated mapping can be manually put in place using the [update mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) API, copying and adapting relevant sections of the legacy mapping to work with the current {{es}} version. While auto-import is expected to work in most cases, failures of doing so should be [raised](https://github.com/elastic/elasticsearch/issues/new/choose) with the Elastic team for future improvements. +In case the auto-import of mappings does not work, or the new {{es}} version can’t make sense of the mapping, it falls back to importing the index without the mapping, but stores the original mapping in the [_meta](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-meta-field.md) section of the imported index. The legacy mapping can then be introspected using the [GET mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) API and an updated mapping can be manually put in place using the [update mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) API, copying and adapting relevant sections of the legacy mapping to work with the current {{es}} version. While auto-import is expected to work in most cases, failures of doing so should be [raised](https://github.com/elastic/elasticsearch/issues/new/choose) with the Elastic team for future improvements. ## Supported APIs [_supported_apis] diff --git a/deploy-manage/upgrade/orchestrator/upgrade-cloud-on-k8s.md b/deploy-manage/upgrade/orchestrator/upgrade-cloud-on-k8s.md index 71b91a0e05..66195cb802 100644 --- a/deploy-manage/upgrade/orchestrator/upgrade-cloud-on-k8s.md +++ b/deploy-manage/upgrade/orchestrator/upgrade-cloud-on-k8s.md @@ -66,7 +66,7 @@ If you are using ECK through an OLM-managed distribution channel like [operatorh ### Upgrading from ECK 1.9 or earlier [k8s_upgrading_from_eck_1_9_or_earlier] -Operator Lifecycle Manager (OLM) and OpenShift OperatorHub users that run with automatic upgrades enabled, are advised to set the `set-default-security-context` [operator flag](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-operator-config.html) explicitly before upgrading to ECK 2.0 or later. If not set, ECK can fail to [auto-detect](https://github.com/elastic/cloud-on-k8s/issues/5061) the correct security context configuration and Elasticsearch Pods may not be allowed to run. +Operator Lifecycle Manager (OLM) and OpenShift OperatorHub users that run with automatic upgrades enabled, are advised to set the `set-default-security-context` [operator flag](/deploy-manage/deploy/cloud-on-k8s/configure-eck.md) explicitly before upgrading to ECK 2.0 or later. If not set, ECK can fail to [auto-detect](https://github.com/elastic/cloud-on-k8s/issues/5061) the correct security context configuration and Elasticsearch Pods may not be allowed to run. ### Upgrading from ECK 2.0 or later [k8s_upgrading_from_eck_2_0_or_later] diff --git a/deploy-manage/upgrade/prepare-to-upgrade/index-compatibility.md b/deploy-manage/upgrade/prepare-to-upgrade/index-compatibility.md index 7d8de2c693..610dd15a6d 100644 --- a/deploy-manage/upgrade/prepare-to-upgrade/index-compatibility.md +++ b/deploy-manage/upgrade/prepare-to-upgrade/index-compatibility.md @@ -29,7 +29,7 @@ To upgrade to 9.0.0-beta1 from 7.16 or an earlier version, **you must first upgr ## REST API compatibility [upgrade-rest-api-compatibility] -[REST API compatibility](https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-api-compatibility.html) is a per-request opt-in feature that can help REST clients mitigate non-compatible (breaking) changes to the REST API. +[REST API compatibility](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/compatibility.md) is a per-request opt-in feature that can help REST clients mitigate non-compatible (breaking) changes to the REST API. ## FIPS Compliance and Java 17 [upgrade-fips-java17] diff --git a/deploy-manage/users-roles/cloud-enterprise-orchestrator/saml.md b/deploy-manage/users-roles/cloud-enterprise-orchestrator/saml.md index 49a435a383..b2ce58d45b 100644 --- a/deploy-manage/users-roles/cloud-enterprise-orchestrator/saml.md +++ b/deploy-manage/users-roles/cloud-enterprise-orchestrator/saml.md @@ -50,7 +50,7 @@ $$$ece-saml-general-settings$$$Begin the provider profile by adding the general ## Map SAML attributes to User Properties [ece-saml-attributes] -The SAML assertion about a user usually includes attribute names and values that can be used for role mapping. The configuration in this section allows to configure a mapping between these SAML attribute values and [Elasticsearch user properties](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-elasticsearch-authentication). When the attributes have been mapped to user properties such as `groups`, these can then be used to configure [role mappings](#ece-saml-role-mapping). Mapping the `principal` user property is required and the `groups` property is recommended for a minimum configuration. +The SAML assertion about a user usually includes attribute names and values that can be used for role mapping. The configuration in this section allows to configure a mapping between these SAML attribute values and [Elasticsearch user properties](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-elasticsearch-authentication). When the attributes have been mapped to user properties such as `groups`, these can then be used to configure [role mappings](#ece-saml-role-mapping). Mapping the `principal` user property is required and the `groups` property is recommended for a minimum configuration. Note that some additional attention must be paid to the `principal` user property. Although the SAML specification does not have many restrictions on the type of value that is mapped, ECE requires that the mapped value is also a valid Elasticsearch native realm identifier. Specifically, this means the mapped identifier should not contain any commas or slashes, and should be otherwise URL friendly. diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md b/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md index 867972e308..11af1c2098 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md @@ -17,7 +17,7 @@ PKI and JWT realms do not cache user credentials, but do cache the resolved user :::: -The cached user credentials are hashed in memory. By default, the {{es}} {{security-features}} use a salted `sha-256` hash algorithm. You can use a different hashing algorithm by setting the `cache.hash_algo` realm settings. See [User cache and password hash algorithms](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#hashing-settings). +The cached user credentials are hashed in memory. By default, the {{es}} {{security-features}} use a salted `sha-256` hash algorithm. You can use a different hashing algorithm by setting the `cache.hash_algo` realm settings. See [User cache and password hash algorithms](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#hashing-settings). ## Evicting users from the cache [cache-eviction-api] diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/looking-up-users-without-authentication.md b/deploy-manage/users-roles/cluster-or-deployment-auth/looking-up-users-without-authentication.md index 61e7634cb8..3f6b1e9dfb 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/looking-up-users-without-authentication.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/looking-up-users-without-authentication.md @@ -22,7 +22,7 @@ See the [run_as](submitting-requests-on-behalf-of-other-users.md) and [delegated * The reserved, [`native`](native.md) and [`file`](file-based.md) realms always support user lookup. * The [`ldap`](ldap.md) realm supports user lookup when the realm is configured in [*user search* mode](ldap.md#ldap-realm-configuration). User lookup is not support when the realm is configured with `user_dn_templates`. -* User lookup support in the [`active_directory`](active-directory.md) realm requires that the realm be configured with a [`bind_dn`](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-ad-settings) and a bind password. +* User lookup support in the [`active_directory`](active-directory.md) realm requires that the realm be configured with a [`bind_dn`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ref-ad-settings) and a bind password. The `pki`, `saml`, `oidc`, `kerberos` and `jwt` realms do not support user lookup. diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/manage-authentication-for-multiple-clusters.md b/deploy-manage/users-roles/cluster-or-deployment-auth/manage-authentication-for-multiple-clusters.md index f682cc4197..962502b507 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/manage-authentication-for-multiple-clusters.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/manage-authentication-for-multiple-clusters.md @@ -36,13 +36,13 @@ This requires a valid Enterprise license or Enterprise trial license. Check [the ::::{tip} -Make sure you check the complete [guide to setting up LDAP with Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/ldap-realm.html). +Make sure you check the complete [guide to setting up LDAP with Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md). :::: ### To configure LDAP using Elastic Stack configuration policy with user search: [k8s_to_configure_ldap_using_elastic_stack_configuration_policy_with_user_search] -1. Add a realm configuration to the `config` field under `elasticsearch` in the `xpack.security.authc.realms.ldap` namespace. At a minimum, you must specify the URL of the LDAP server and the order of the LDAP realm compared to other configured security realms. You also have to set `user_search.base_dn` to the container DN where the users are searched for. Refer to [LDAP realm settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-ldap-settings) for all of the options you can set for an LDAP realm. For example, the following snippet shows an LDAP realm configured with a user search: +1. Add a realm configuration to the `config` field under `elasticsearch` in the `xpack.security.authc.realms.ldap` namespace. At a minimum, you must specify the URL of the LDAP server and the order of the LDAP realm compared to other configured security realms. You also have to set `user_search.base_dn` to the container DN where the users are searched for. Refer to [LDAP realm settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ref-ldap-settings) for all of the options you can set for an LDAP realm. For example, the following snippet shows an LDAP realm configured with a user search: ```yaml elasticsearch: @@ -205,7 +205,7 @@ This requires a valid Enterprise license or Enterprise trial license. Check [the ::::{tip} -Make sure you check the complete [guide to setting up OpenID Connect with Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html). +Make sure you check the complete [guide to setting up OpenID Connect with Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md). :::: @@ -386,7 +386,7 @@ This requires a valid Enterprise license or Enterprise trial license. Check [the ::::{tip} -Make sure you check the complete [guide to setting up JWT with Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/jwt-auth-realm.html). +Make sure you check the complete [guide to setting up JWT with Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/jwt.md). :::: diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/operator-only-functionality.md b/deploy-manage/users-roles/cluster-or-deployment-auth/operator-only-functionality.md index 5ccbeb1923..816a814f08 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/operator-only-functionality.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/operator-only-functionality.md @@ -29,7 +29,7 @@ Operator privileges provide protection for APIs and dynamic cluster settings. An ## Operator-only dynamic cluster settings [operator-only-dynamic-cluster-settings] * All [IP filtering](../../security/ip-traffic-filtering.md) settings -* The following dynamic [machine learning settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html): +* The following dynamic [machine learning settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md): * `xpack.ml.node_concurrent_job_allocations` * `xpack.ml.max_machine_memory_percent` @@ -41,8 +41,8 @@ Operator privileges provide protection for APIs and dynamic cluster settings. An * `xpack.ml.enable_config_migration` * `xpack.ml.persist_results_max_retries` -* The [`cluster.routing.allocation.disk.threshold_enabled` setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#cluster-routing-disk-threshold) -* The following [recovery settings for managed services](https://www.elastic.co/guide/en/elasticsearch/reference/current/recovery.html#recovery-settings-for-managed-services): +* The [`cluster.routing.allocation.disk.threshold_enabled` setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#cluster-routing-disk-threshold) +* The following [recovery settings for managed services](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-recovery-settings.md#recovery-settings-for-managed-services): * `node.bandwidth.recovery.operator.factor` * `node.bandwidth.recovery.operator.factor.read` diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/pki.md b/deploy-manage/users-roles/cluster-or-deployment-auth/pki.md index 5c537ae6dc..bcd11daf02 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/pki.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/pki.md @@ -13,7 +13,7 @@ You can also use PKI certificates to authenticate to {{kib}}, however this requi To use PKI in {{es}}, you configure a PKI realm, enable client authentication on the desired network layers (transport or http), and map the Distinguished Names (DNs) from the Subject field in the user certificates to roles. You create the mappings in a role mapping file or use the role mappings API. -1. Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the `xpack.security.authc.realms.pki` namespace. You must explicitly set the `order` attribute. See [PKI realm settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-pki-settings) for all of the options you can set for a `pki` realm. +1. Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the `xpack.security.authc.realms.pki` namespace. You must explicitly set the `order` attribute. See [PKI realm settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ref-pki-settings) for all of the options you can set for a `pki` realm. For example, the following snippet shows the most basic `pki` realm configuration: diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md b/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md index eef6143b7c..39eefac7d2 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md @@ -42,7 +42,7 @@ xpack.security.authc.realms: enabled: false ``` -As can be seen above, each realm has a unique name that identifies it. Each type of realm dictates its own set of required and optional settings. That said, there are [settings that are common to all realms](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-realm-settings). +As can be seen above, each realm has a unique name that identifies it. Each type of realm dictates its own set of required and optional settings. That said, there are [settings that are common to all realms](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#ref-realm-settings). ## Delegating authorization to another realm [authorization_realms] diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md b/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md index 5c8742aff2..0686dc8f3d 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md @@ -47,7 +47,7 @@ Service tokens can be backed by either the `.security` index (recommended) or th You must create a service token to use a service account. You can create a service token using either: * The [create service account token API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token), which saves the new service token in the `.security` index and returns the bearer token in the HTTP response. -* The [elasticsearch-service-tokens](https://www.elastic.co/guide/en/elasticsearch/reference/current/service-tokens-command.html) CLI tool, which saves the new service token in the `$ES_HOME/config/service_tokens` file and outputs the bearer token to your terminal +* The [elasticsearch-service-tokens](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/service-tokens-command.md) CLI tool, which saves the new service token in the `$ES_HOME/config/service_tokens` file and outputs the bearer token to your terminal We recommend that you create service tokens via the REST API rather than the CLI. The API stores service tokens within the `.security` index which means that the tokens are available for authentication on all nodes, and will be backed up within cluster snapshots. The use of the CLI is intended for cases where there is an external orchestration process (such as [{{ece}}](https://www.elastic.co/guide/en/cloud-enterprise/current) or [{{eck}}](https://www.elastic.co/guide/en/cloud-on-k8s/current)) that will manage the creation and distribution of the `service_tokens` file. diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/token-based-authentication-services.md b/deploy-manage/users-roles/cluster-or-deployment-auth/token-based-authentication-services.md index 1074128244..a998a6dfea 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/token-based-authentication-services.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/token-based-authentication-services.md @@ -10,7 +10,7 @@ The {{stack-security-features}} authenticate users by using realms and one or mo The {{security-features}} provide the following built-in token-based authentication services, which are listed in the order they are consulted: *service-accounts* -: The [service accounts](service-accounts.md) use either the [create service account token API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) or the [elasticsearch-service-tokens](https://www.elastic.co/guide/en/elasticsearch/reference/current/service-tokens-command.html) CLI tool to generate service account tokens. +: The [service accounts](service-accounts.md) use either the [create service account token API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) or the [elasticsearch-service-tokens](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/service-tokens-command.md) CLI tool to generate service account tokens. To use a service account token, include the generated token value in a request with an `Authorization: Bearer` header: diff --git a/explore-analyze/alerts-cases/alerts/alerting-common-issues.md b/explore-analyze/alerts-cases/alerts/alerting-common-issues.md index 0db738b832..1b2d473f46 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-common-issues.md +++ b/explore-analyze/alerts-cases/alerts/alerting-common-issues.md @@ -18,7 +18,7 @@ Rules with a small check interval, such as every two seconds, run later than sch **Solution** -Rules run as background tasks at a cadence defined by their **check interval**. When a Rule **check interval** is smaller than the Task Manager [`poll_interval`](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-settings), the rule will run late. +Rules run as background tasks at a cadence defined by their **check interval**. When a Rule **check interval** is smaller than the Task Manager [`poll_interval`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md#task-manager-settings), the rule will run late. Either tweak the [{{kib}} Task Manager settings](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html#task-manager-settings) or increase the **check interval** of the rules in question. @@ -36,7 +36,7 @@ Actions run long after the status of a rule changes, sending a notification of t Rules and actions run as background tasks by each {{kib}} instance at a default rate of ten tasks every three seconds. When diagnosing issues related to alerting, focus on the tasks that begin with `alerting:` and `actions:`. -Alerting tasks always begin with `alerting:`. For example, the `alerting:.index-threshold` tasks back the [index threshold stack rule](rule-type-index-threshold.md). Action tasks always begin with `actions:`. For example, the `actions:.index` tasks back the [index action](https://www.elastic.co/guide/en/kibana/current/index-action-type.html). +Alerting tasks always begin with `alerting:`. For example, the `alerting:.index-threshold` tasks back the [index threshold stack rule](rule-type-index-threshold.md). Action tasks always begin with `actions:`. For example, the `actions:.index` tasks back the [index action](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/index-action-type.md). For more details on monitoring and diagnosing tasks in Task Manager, refer to [Health monitoring](../../../deploy-manage/monitor/kibana-task-manager-health-monitoring.md). @@ -48,7 +48,7 @@ A connector gets a TLS socket error when connecting to the server to run an acti **Solution** -Configuration options are available to specialize connections to TLS servers, including ignoring server certificate validation and providing certificate authority data to verify servers using custom certificates. For more details, see [Action settings](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html#action-settings). +Configuration options are available to specialize connections to TLS servers, including ignoring server certificate validation and providing certificate authority data to verify servers using custom certificates. For more details, see [Action settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md#action-settings). ## Rules take a long time to run [rules-long-run-time] @@ -243,7 +243,7 @@ This error happens when the `xpack.encryptedSavedObjects.encryptionKey` value us | | | | --- | --- | -| If the value in `xpack.encryptedSavedObjects.encryptionKey` was manually changed, and the previous encryption key is still known. | Ensure any previous encryption key is included in the keys used for [decryption only](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#xpack-encryptedSavedObjects-keyRotation-decryptionOnlyKeys). | +| If the value in `xpack.encryptedSavedObjects.encryptionKey` was manually changed, and the previous encryption key is still known. | Ensure any previous encryption key is included in the keys used for [decryption only](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md#xpack-encryptedSavedObjects-keyRotation-decryptionOnlyKeys). | | If another {{kib}} instance with a different encryption key connects to the cluster. | The other {{kib}} instance might be trying to run the rule using a different encryption key than what the rule was created with. Ensure the encryption keys among all the {{kib}} instances are the same, and setting [decryption only keys](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#xpack-encryptedSavedObjects-keyRotation-decryptionOnlyKeys) for previously used encryption keys. | | If other scenarios don’t apply. | Generate a new API key for the rule. For example, in **{{stack-manage-app}} > {{rules-ui}}**, select **Update API key** from the action menu. | diff --git a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md index b097e508d1..c705effc0e 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md +++ b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md @@ -7,7 +7,7 @@ navigation_title: Getting started with alerts # Getting started with alerting [alerting-getting-started] -Alerting enables you to define *rules*, which detect complex conditions within different {{kib}} apps and trigger actions when those conditions are met. Alerting is integrated with [**{{observability}}**](../../../solutions/observability/incident-management/alerting.md), [**Security**](https://www.elastic.co/guide/en/security/current/prebuilt-rules.html), [**Maps**](../../../explore-analyze/alerts-cases/alerts/geo-alerting.md) and [**{{ml-app}}**](../../../explore-analyze/machine-learning/anomaly-detection/ml-configuring-alerts.md). It can be centrally managed from **{{stack-manage-app}}** and provides a set of built-in [connectors](../../../deploy-manage/manage-connectors.md) and [rules](../../../explore-analyze/alerts-cases/alerts/rule-types.md#stack-rules) for you to use. +Alerting enables you to define *rules*, which detect complex conditions within different {{kib}} apps and trigger actions when those conditions are met. Alerting is integrated with [**{{observability}}**](../../../solutions/observability/incident-management/alerting.md), [**Security**](asciidocalypse://docs/docs-content/docs/reference/security/prebuilt-rules.md), [**Maps**](../../../explore-analyze/alerts-cases/alerts/geo-alerting.md) and [**{{ml-app}}**](../../../explore-analyze/machine-learning/anomaly-detection/ml-configuring-alerts.md). It can be centrally managed from **{{stack-manage-app}}** and provides a set of built-in [connectors](../../../deploy-manage/manage-connectors.md) and [rules](../../../explore-analyze/alerts-cases/alerts/rule-types.md#stack-rules) for you to use. :::{image} ../../../images/kibana-alerting-overview.png :alt: {{rules-ui}} UI diff --git a/explore-analyze/alerts-cases/alerts/alerting-setup.md b/explore-analyze/alerts-cases/alerts/alerting-setup.md index 7b9cf3f67a..c190265265 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-setup.md +++ b/explore-analyze/alerts-cases/alerts/alerting-setup.md @@ -15,14 +15,14 @@ mapped_pages: If you are using an **on-premises** {{stack}} deployment: -* In the `kibana.yml` configuration file, add the [`xpack.encryptedSavedObjects.encryptionKey`](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html#general-alert-action-settings) setting. +* In the `kibana.yml` configuration file, add the [`xpack.encryptedSavedObjects.encryptionKey`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md#general-alert-action-settings) setting. * For emails to have a footer with a link back to {{kib}}, set the [`server.publicBaseUrl`](../../../deploy-manage/deploy/self-managed/configure.md#server-publicBaseUrl) configuration setting. If you are using an **on-premises** {{stack}} deployment with [**security**](../../../deploy-manage/security.md): -* If you are unable to access {{kib}} {{alert-features}}, ensure that you have not [explicitly disabled API keys](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings). +* If you are unable to access {{kib}} {{alert-features}}, ensure that you have not [explicitly disabled API keys](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/security-settings.md#api-key-service-settings). -The alerting framework uses queries that require the `search.allow_expensive_queries` setting to be `true`. See the scripts [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html#_allow_expensive_queries_4). +The alerting framework uses queries that require the `search.allow_expensive_queries` setting to be `true`. See the scripts [documentation](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-query.md#_allow_expensive_queries_4). ## Production considerations and scaling guidance [alerting-setup-production] @@ -47,7 +47,7 @@ The **{{connectors-feature}}** feature privilege is required to manage connector Likewise, you can customize the **Rules Settings** sub-feature privileges related to flapping detection settings. -To create a rule that uses the [Cases connector](https://www.elastic.co/guide/en/kibana/current/cases-action-type.html), you must also have `All` privileges for the **Cases** feature. +To create a rule that uses the [Cases connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-action-type.md), you must also have `All` privileges for the **Cases** feature. The rule type also affects the privileges that are required. For example, to create or edit {{ml}} rules, you must have `all` privileges for the **Analytics > {{ml-app}}** feature. For {{stack-monitor-app}} rules, you must have the `monitoring_user` role. For {{observability}} rules, you must have `all` privileges for the appropriate {{observability}} features. For Security rules, refer to [Detections prerequisites and requirements](../../../solutions/security/detect-and-alert/detections-requirements.md). diff --git a/explore-analyze/alerts-cases/alerts/alerting-troubleshooting.md b/explore-analyze/alerts-cases/alerts/alerting-troubleshooting.md index 15fed0550e..c983214c2b 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-troubleshooting.md +++ b/explore-analyze/alerts-cases/alerts/alerting-troubleshooting.md @@ -177,7 +177,7 @@ In addition to the above methods, refer to the following approaches and common i ### Temporarily throttle all tasks [alerting-kibana-throttle] -If cluster performance becomes degraded from excessive or expensive rules and {{kib}} is sluggish or unresponsive, you can temporarily reduce load to the Task Manager by updating its [settings](https://www.elastic.co/guide/en/kibana/current/task-manager-settings-kb.html): +If cluster performance becomes degraded from excessive or expensive rules and {{kib}} is sluggish or unresponsive, you can temporarily reduce load to the Task Manager by updating its [settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/task-manager-settings.md): ```txt xpack.task_manager.capacity: 5 diff --git a/explore-analyze/alerts-cases/alerts/create-manage-rules.md b/explore-analyze/alerts-cases/alerts/create-manage-rules.md index 247ccd14f8..1479f5e0d6 100644 --- a/explore-analyze/alerts-cases/alerts/create-manage-rules.md +++ b/explore-analyze/alerts-cases/alerts/create-manage-rules.md @@ -8,7 +8,7 @@ mapped_pages: # Create and manage rules [create-and-manage-rules] -The **{{stack-manage-app}}** > **{{rules-ui}}** UI provides a cross-app view of alerting. Different {{kib}} apps like [**{{observability}}**](../../../solutions/observability/incident-management/alerting.md), [**Security**](https://www.elastic.co/guide/en/security/current/prebuilt-rules.html), [**Maps**](geo-alerting.md) and [**{{ml-app}}**](../../machine-learning/machine-learning-in-kibana.md) can offer their own rules. +The **{{stack-manage-app}}** > **{{rules-ui}}** UI provides a cross-app view of alerting. Different {{kib}} apps like [**{{observability}}**](../../../solutions/observability/incident-management/alerting.md), [**Security**](asciidocalypse://docs/docs-content/docs/reference/security/prebuilt-rules.md), [**Maps**](geo-alerting.md) and [**{{ml-app}}**](../../machine-learning/machine-learning-in-kibana.md) can offer their own rules. You can find **Rules** in **Stack Management** > **Alerts and insights** > **Rules** in {{kib}} or by using the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). @@ -58,7 +58,7 @@ You can add one or more actions to your rule to generate notifications when its Each action uses a connector, which provides connection information for a {{kib}} service or third party integration, depending on where you want to send the notifications. -[preview] Some connectors that perform actions within {{kib}}, such as the [Cases connector](https://www.elastic.co/guide/en/kibana/current/cases-action-type.html), require less configuration. For example, you do not need to set the action frequency or variables. +[preview] Some connectors that perform actions within {{kib}}, such as the [Cases connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-action-type.md), require less configuration. For example, you do not need to set the action frequency or variables. After you select a connector, set the action frequency. You can choose to create a summary of alerts on each check interval or on a custom interval. Alternatively, you an choose to run actions for each alert (at each check interval, only when the alert status changes, or at a custom interval). @@ -150,7 +150,7 @@ Click the rule name to access a rule details page: In this example, the rule detects when a site serves more than a threshold number of bytes in a 24 hour period. Four sites are above the threshold. These are called alerts - occurrences of the condition being detected - and the alert name, status, time of detection, and duration of the condition are shown in this view. Alerts come and go from the list depending on whether the rule conditions are met. For more information about alerts, go to [*View alerts*](view-alerts.md). -If there are rule actions that failed to run successfully, you can see the details on the **History** tab. In the **Message** column, click the warning or expand icon ![double arrow icon to open a flyout with the document details](../../../images/kibana-expand-icon-2.png "") or click the number in the **Errored actions** column to open the **Errored Actions** panel. In this example, the action failed because the [`xpack.actions.email.domain_allowlist`](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html#action-config-email-domain-allowlist) setting was updated and the action’s email recipient is no longer included in the allowlist: +If there are rule actions that failed to run successfully, you can see the details on the **History** tab. In the **Message** column, click the warning or expand icon ![double arrow icon to open a flyout with the document details](../../../images/kibana-expand-icon-2.png "") or click the number in the **Errored actions** column to open the **Errored Actions** panel. In this example, the action failed because the [`xpack.actions.email.domain_allowlist`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md#action-config-email-domain-allowlist) setting was updated and the action’s email recipient is no longer included in the allowlist: :::{image} ../../../images/kibana-rule-details-errored-actions.png :alt: Rule histor page with alerts that have errored actions diff --git a/explore-analyze/alerts-cases/alerts/notifications-domain-allowlist.md b/explore-analyze/alerts-cases/alerts/notifications-domain-allowlist.md index b5d0672fff..ed9290501b 100644 --- a/explore-analyze/alerts-cases/alerts/notifications-domain-allowlist.md +++ b/explore-analyze/alerts-cases/alerts/notifications-domain-allowlist.md @@ -33,7 +33,7 @@ This updates the notifications settings for {{es}} and {{kib}} to reflect what i ### Use the {{ecloud}} Control CLI [use-the-ecloud-control-cli] -Updating multiple deployments through the UI can take a lot of time. Instead, you can use the [{{ecloud}} Control](https://www.elastic.co/guide/en/ecctl/current/ecctl-overview.html) command-line interface (`ecctl`) to automate the deployment update. +Updating multiple deployments through the UI can take a lot of time. Instead, you can use the [{{ecloud}} Control](asciidocalypse://docs/ecctl/docs/reference/cloud/ecctl/index.md) command-line interface (`ecctl`) to automate the deployment update. The following example script shows how to update all deployments of an organization: diff --git a/explore-analyze/alerts-cases/alerts/rule-action-variables.md b/explore-analyze/alerts-cases/alerts/rule-action-variables.md index 42010c1637..948ee70090 100644 --- a/explore-analyze/alerts-cases/alerts/rule-action-variables.md +++ b/explore-analyze/alerts-cases/alerts/rule-action-variables.md @@ -20,9 +20,9 @@ The available variables differ by rule type, however there are some common varia Some cases exist where the variable values will be "escaped" when used in a context where escaping is needed. For example: -* For the [email connector](https://www.elastic.co/guide/en/kibana/current/email-action-type.html), the `message` action configuration property escapes any characters that would be interpreted as Markdown. -* For the [Slack connector](https://www.elastic.co/guide/en/kibana/current/slack-action-type.html), the `message` action configuration property escapes any characters that would be interpreted as Slack Markdown. -* For the [Webhook connector](https://www.elastic.co/guide/en/kibana/current/webhook-action-type.html), the `body` action configuration property escapes any characters that are invalid in JSON string values. +* For the [email connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/email-action-type.md), the `message` action configuration property escapes any characters that would be interpreted as Markdown. +* For the [Slack connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/slack-action-type.md), the `message` action configuration property escapes any characters that would be interpreted as Slack Markdown. +* For the [Webhook connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/webhook-action-type.md), the `body` action configuration property escapes any characters that are invalid in JSON string values. Mustache also supports "triple braces" of the form `{{{variable name}}}`, which indicates no escaping should be done at all. Use this form with caution, since it could end up rendering the variable content such that the resulting parameter is invalid or formatted incorrectly. diff --git a/explore-analyze/alerts-cases/alerts/rule-type-es-query.md b/explore-analyze/alerts-cases/alerts/rule-type-es-query.md index 95c9a2651a..0b651f4864 100644 --- a/explore-analyze/alerts-cases/alerts/rule-type-es-query.md +++ b/explore-analyze/alerts-cases/alerts/rule-type-es-query.md @@ -52,7 +52,7 @@ When you create an {{es}} query rule, your choice of query type affects the info : Specify how to calculate the value that is compared to the threshold. The value is calculated by aggregating a numeric field within the time window. The aggregation options are: `count`, `average`, `sum`, `min`, and `max`. When using `count` the document count is used and an aggregation field is not necessary. Over or Grouped Over - : Specify whether the aggregation is applied over all documents or split into groups using up to four grouping fields. If you choose to use grouping, it’s a [terms](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) or [multi terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-multi-terms-aggregation.html); an alert will be created for each unique set of values when it meets the condition. To limit the number of alerts on high cardinality fields, you must specify the number of groups to check against the threshold. Only the top groups are checked. + : Specify whether the aggregation is applied over all documents or split into groups using up to four grouping fields. If you choose to use grouping, it’s a [terms](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) or [multi terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-multi-terms-aggregation.md); an alert will be created for each unique set of values when it meets the condition. To limit the number of alerts on high cardinality fields, you must specify the number of groups to check against the threshold. Only the top groups are checked. Threshold : Defines a threshold value and a comparison operator (`is above`, `is above or equals`, `is below`, `is below or equals`, or `is between`). The value calculated by the aggregation is compared to this threshold. @@ -150,7 +150,7 @@ The following variables are specific to the {{es}} query rule: {{/context.hits}} ``` - The documents returned by `context.hits` include the [`_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html) field. If the {{es}} query search API’s [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#search-fields-param) parameter is used, documents will also return the `fields` field, which can be used to access any runtime fields defined by the [`runtime_mappings`](../../../manage-data/data-store/mapping/define-runtime-fields-in-search-request.md) parameter. For example: + The documents returned by `context.hits` include the [`_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md) field. If the {{es}} query search API’s [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#search-fields-param) parameter is used, documents will also return the `fields` field, which can be used to access any runtime fields defined by the [`runtime_mappings`](../../../manage-data/data-store/mapping/define-runtime-fields-in-search-request.md) parameter. For example: ```handlebars {{#context.hits}} diff --git a/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md b/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md index a21835e48a..626f53fbed 100644 --- a/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md +++ b/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md @@ -83,7 +83,7 @@ The following action variables are specific to the index threshold rule. You can ## Example [_example] -In this example, you will use the {{kib}} [sample weblog data set](https://www.elastic.co/guide/en/kibana/current/get-started.html) to set up and tune the conditions on an index threshold rule. For this example, you want to detect when any of the top four sites serve more than 420,000 bytes over a 24 hour period. +In this example, you will use the {{kib}} [sample weblog data set](/explore-analyze/index.md) to set up and tune the conditions on an index threshold rule. For this example, you want to detect when any of the top four sites serve more than 420,000 bytes over a 24 hour period. 1. Go to **{{stack-manage-app}} > {{rules-ui}}** and click **Create rule**. 2. Select the **Index threshold** rule type. diff --git a/explore-analyze/alerts-cases/alerts/rule-types.md b/explore-analyze/alerts-cases/alerts/rule-types.md index 911f803a57..7568d9f4fd 100644 --- a/explore-analyze/alerts-cases/alerts/rule-types.md +++ b/explore-analyze/alerts-cases/alerts/rule-types.md @@ -41,7 +41,7 @@ If you create a rule in the {{observability}} app, its alerts are not visible in ## Security rules [security-rules] -Security rules detect suspicious source events with pre-built or custom rules and create alerts when a rule’s conditions are met. For more information, refer to [Security rules](https://www.elastic.co/guide/en/security/current/prebuilt-rules.html). +Security rules detect suspicious source events with pre-built or custom rules and create alerts when a rule’s conditions are met. For more information, refer to [Security rules](asciidocalypse://docs/docs-content/docs/reference/security/prebuilt-rules.md). ::::{note} Alerts associated with security rules are visible only in the {{security-app}}; they are not visible in **{{stack-manage-app}} > {{rules-ui}}**. diff --git a/explore-analyze/alerts-cases/cases/manage-cases-settings.md b/explore-analyze/alerts-cases/cases/manage-cases-settings.md index af02170741..7d33018afd 100644 --- a/explore-analyze/alerts-cases/cases/manage-cases-settings.md +++ b/explore-analyze/alerts-cases/cases/manage-cases-settings.md @@ -43,7 +43,7 @@ You can create connectors in **{{stack-manage-app}} > {{connectors-ui}}**, as de 1. From the **Incident management system** list, select **Add new connector**. 2. Select an external incident management system. -3. Enter your required settings. Refer to [{{ibm-r}}](https://www.elastic.co/guide/en/kibana/current/resilient-action-type.html), [Jira](https://www.elastic.co/guide/en/kibana/current/jira-action-type.html), [{{sn-itsm}}](https://www.elastic.co/guide/en/kibana/current/servicenow-action-type.html), [{{sn-sir}}](https://www.elastic.co/guide/en/kibana/current/servicenow-sir-action-type.html), [Swimlane](https://www.elastic.co/guide/en/kibana/current/swimlane-action-type.html), [{{hive}}](https://www.elastic.co/guide/en/kibana/current/thehive-action-type.html), or [{{webhook-cm}}](https://www.elastic.co/guide/en/kibana/current/cases-webhook-action-type.html) for connector configuration details. +3. Enter your required settings. Refer to [{{ibm-r}}](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/resilient-action-type.md), [Jira](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/jira-action-type.md), [{{sn-itsm}}](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/servicenow-action-type.md), [{{sn-sir}}](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/servicenow-sir-action-type.md), [Swimlane](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/swimlane-action-type.md), [{{hive}}](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/thehive-action-type.md), or [{{webhook-cm}}](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-webhook-action-type.md) for connector configuration details. You can subsequently choose the connector when you create cases and use it in case templates. To change the default connector for new cases, select the connector from the **Incident management system** list. diff --git a/explore-analyze/alerts-cases/cases/manage-cases.md b/explore-analyze/alerts-cases/cases/manage-cases.md index 92769b2c6f..8507f8431d 100644 --- a/explore-analyze/alerts-cases/cases/manage-cases.md +++ b/explore-analyze/alerts-cases/cases/manage-cases.md @@ -31,7 +31,7 @@ Open a new case to keep track of issues and share their details with colleagues. 6. For the **External incident management system**, select a connector. For more information, refer to [External incident management systems](manage-cases-settings.md#case-connectors). 7. After you’ve completed all of the required fields, click **Create case**. -[preview] Alternatively, you can configure your rules to automatically create cases by using [case actions](https://www.elastic.co/guide/en/kibana/current/cases-action-type.html). By default, the rule adds all of the alerts within a specified time window to a single case. You can optionally choose a field to group the alerts and create separate cases for each group. You can also choose whether you want the rule to reopen cases or open new ones when the time window elapses. +[preview] Alternatively, you can configure your rules to automatically create cases by using [case actions](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-action-type.md). By default, the rule adds all of the alerts within a specified time window to a single case. You can optionally choose a field to group the alerts and create separate cases for each group. You can also choose whether you want the rule to reopen cases or open new ones when the time window elapses. ## Add email notifications [add-case-notifications] @@ -47,7 +47,7 @@ For self-managed {{kib}}: 1. Create a preconfigured email connector. ::::{note} - At this time, email notifications support only preconfigured connectors, which are defined in the `kibana.yml` file. For examples, refer to [Email connectors](https://www.elastic.co/guide/en/kibana/current/pre-configured-connectors.html#preconfigured-email-configuration) and [Configure email accounts for well-known services](https://www.elastic.co/guide/en/kibana/current/email-action-type.html#configuring-email). + At this time, email notifications support only preconfigured connectors, which are defined in the `kibana.yml` file. For examples, refer to [Email connectors](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/pre-configured-connectors.md#preconfigured-email-configuration) and [Configure email accounts for well-known services](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/email-action-type.md#configuring-email). :::: 2. Set the `notifications.connectors.default.email` {{kib}} setting in kibana.yml to the name of your email connector. diff --git a/explore-analyze/alerts-cases/cases/setup-cases.md b/explore-analyze/alerts-cases/cases/setup-cases.md index 7bef85ffeb..2b7f4374fe 100644 --- a/explore-analyze/alerts-cases/cases/setup-cases.md +++ b/explore-analyze/alerts-cases/cases/setup-cases.md @@ -33,7 +33,7 @@ By default, `All` for the **Cases** feature includes authority to delete cases a ::::{note} Before a user can be assigned to a case, they must log into {{kib}} at least once, which creates a user profile. -This privilege is also required to add [case actions](https://www.elastic.co/guide/en/kibana/current/cases-action-type.html) to rules. +This privilege is also required to add [case actions](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-action-type.md) to rules. :::: diff --git a/explore-analyze/alerts-cases/watcher/actions-email.md b/explore-analyze/alerts-cases/watcher/actions-email.md index 1ec2231595..53d682671a 100644 --- a/explore-analyze/alerts-cases/watcher/actions-email.md +++ b/explore-analyze/alerts-cases/watcher/actions-email.md @@ -282,7 +282,7 @@ bin/elasticsearch-keystore add xpack.notification.email.account.exchange_account The `email` action supports sending messages with an HTML body. However, for security reasons, {{watcher}} [sanitizes](https://en.wikipedia.org/wiki/HTML_sanitization) the HTML. -You can control which HTML features are allowed or disallowed by configuring the `xpack.notification.email.html.sanitization.allow` and `xpack.notification.email.html.sanitization.disallow` settings in `elasticsearch.yml`. You can specify individual HTML elements and [HTML feature groups](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#html-feature-groups). By default, {{watcher}} allows the following features: `body`, `head`, `_tables`, `_links`, `_blocks`, `_formatting` and `img:embedded`. +You can control which HTML features are allowed or disallowed by configuring the `xpack.notification.email.html.sanitization.allow` and `xpack.notification.email.html.sanitization.disallow` settings in `elasticsearch.yml`. You can specify individual HTML elements and [HTML feature groups](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md#html-feature-groups). By default, {{watcher}} allows the following features: `body`, `head`, `_tables`, `_links`, `_blocks`, `_formatting` and `img:embedded`. For example, the following settings allow the HTML to contain tables and block elements, but disallow `

`, `

` and `
` tags. diff --git a/explore-analyze/alerts-cases/watcher/actions-index.md b/explore-analyze/alerts-cases/watcher/actions-index.md index e85250ef12..5cd0c3592a 100644 --- a/explore-analyze/alerts-cases/watcher/actions-index.md +++ b/explore-analyze/alerts-cases/watcher/actions-index.md @@ -43,7 +43,7 @@ The following snippet shows a simple `index` action definition: | `op_type` | no | `index` | The [op_type](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) for the index operation. Must be one of either `index` or `create`. Must be `create` if `index` is a data stream. | | `execution_time_field` | no | - | The field that will store/index the watch execution time. | | `timeout` | no | 60s | The timeout for waiting for the index api call to return. If no response is returned within this time, the index action times out and fails. This setting overrides the default timeouts. | -| `refresh` | no | - | Optional setting of the [refresh policy](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-refresh.html) for the write request | +| `refresh` | no | - | Optional setting of the [refresh policy](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/refresh-parameter.md) for the write request | ## Multi-document support [anatomy-actions-index-multi-doc-support] diff --git a/explore-analyze/alerts-cases/watcher/actions-jira.md b/explore-analyze/alerts-cases/watcher/actions-jira.md index a48e136ba1..c257a7fc0f 100644 --- a/explore-analyze/alerts-cases/watcher/actions-jira.md +++ b/explore-analyze/alerts-cases/watcher/actions-jira.md @@ -108,7 +108,7 @@ xpack.notification.jira: It is strongly advised to use Basic Authentication with secured HTTPS protocol only. :::: -You can also specify defaults for the [Jira issues](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#jira-account-attributes): +You can also specify defaults for the [Jira issues](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md#jira-account-attributes): ```yaml xpack.notification.jira: diff --git a/explore-analyze/alerts-cases/watcher/actions-slack.md b/explore-analyze/alerts-cases/watcher/actions-slack.md index 7b4d693678..01f090d570 100644 --- a/explore-analyze/alerts-cases/watcher/actions-slack.md +++ b/explore-analyze/alerts-cases/watcher/actions-slack.md @@ -153,7 +153,7 @@ You can no longer configure Slack accounts using `elasticsearch.yml` settings. P :::: -You can specify defaults for the [Slack notification attributes](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#slack-account-attributes): +You can specify defaults for the [Slack notification attributes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md#slack-account-attributes): ```yaml xpack.notification.slack: diff --git a/explore-analyze/alerts-cases/watcher/actions-webhook.md b/explore-analyze/alerts-cases/watcher/actions-webhook.md index 2bbaad7df0..072cebd718 100644 --- a/explore-analyze/alerts-cases/watcher/actions-webhook.md +++ b/explore-analyze/alerts-cases/watcher/actions-webhook.md @@ -71,7 +71,7 @@ You can use basic authentication when sending a request to a secured webservice. By default, both the username and the password are stored in the `.watches` index in plain text. When the {{es}} {{security-features}} are enabled, {{watcher}} can encrypt the password before storing it. :::: -You can also use PKI-based authentication when submitting requests to a cluster that has {{es}} {{security-features}} enabled. When you use PKI-based authentication instead of HTTP basic auth, you don’t need to store any authentication information in the watch itself. To use PKI-based authentication, you [configure the SSL key settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#ssl-notification-settings) for {{watcher}} in `elasticsearch.yml`. +You can also use PKI-based authentication when submitting requests to a cluster that has {{es}} {{security-features}} enabled. When you use PKI-based authentication instead of HTTP basic auth, you don’t need to store any authentication information in the watch itself. To use PKI-based authentication, you [configure the SSL key settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md#ssl-notification-settings) for {{watcher}} in `elasticsearch.yml`. ## Query parameters [webhook-query-parameters] diff --git a/explore-analyze/alerts-cases/watcher/enable-watcher.md b/explore-analyze/alerts-cases/watcher/enable-watcher.md index 71289d1282..94fba8b683 100644 --- a/explore-analyze/alerts-cases/watcher/enable-watcher.md +++ b/explore-analyze/alerts-cases/watcher/enable-watcher.md @@ -128,7 +128,7 @@ PUT _watcher/watch/test-alarm ## Configuring a custom mail server [watcher-custom-mail-server] -It is possible to use a custom mail service instead of the one configured by default. It can be configured by following the [Elasticsearch documentation for configuring email accounts](https://www.elastic.co/guide/en/elasticsearch/reference/current/actions-email.html). +It is possible to use a custom mail service instead of the one configured by default. It can be configured by following the [Elasticsearch documentation for configuring email accounts](/explore-analyze/alerts-cases/watcher/actions-email.md). An example on how to configure a new account from the Elastic cloud console: @@ -158,4 +158,4 @@ An example on how to configure a new account from the Elastic cloud console: 6. The new email account is now set up. It will now be used by default for watcher email actions. -For a full reference of all available settings, see the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#email-notification-settings). +For a full reference of all available settings, see the [Elasticsearch documentation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md#email-notification-settings). diff --git a/explore-analyze/alerts-cases/watcher/encrypting-data.md b/explore-analyze/alerts-cases/watcher/encrypting-data.md index 7c32b8cd0a..488ceb87f3 100644 --- a/explore-analyze/alerts-cases/watcher/encrypting-data.md +++ b/explore-analyze/alerts-cases/watcher/encrypting-data.md @@ -14,13 +14,13 @@ Every `password` field that is used in your watch within an HTTP basic authentic To encrypt sensitive data in {{watcher}}: -1. Use the [elasticsearch-syskeygen](https://www.elastic.co/guide/en/elasticsearch/reference/current/syskeygen.html) command to create a system key file. +1. Use the [elasticsearch-syskeygen](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/syskeygen.md) command to create a system key file. 2. Copy the `system_key` file to all of the nodes in your cluster. ::::{important} The system key is a symmetric key, so the same key must be used on every node in the cluster. :::: -3. Set the [`xpack.watcher.encrypt_sensitive_data` setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html): +3. Set the [`xpack.watcher.encrypt_sensitive_data` setting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md): ```sh xpack.watcher.encrypt_sensitive_data: true diff --git a/explore-analyze/alerts-cases/watcher/input-search.md b/explore-analyze/alerts-cases/watcher/input-search.md index 2728985051..86ab3a3b92 100644 --- a/explore-analyze/alerts-cases/watcher/input-search.md +++ b/explore-analyze/alerts-cases/watcher/input-search.md @@ -141,7 +141,7 @@ The total number of hits in the search response is returned as an object in the | `request.indices` | no | - | The indices to search. If omitted, all indices are searched, which is the default behaviour in Elasticsearch. | | `request.body` | no | - | The body of the request. The [request body](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) follows the same structure you normally send in the body of a REST `_search` request. The body can be static text or include `mustache` [templates](how-watcher-works.md#templates). | | `request.template` | no | - | The body of the search template. See [configure templates](how-watcher-works.md#templates) for more information. | -| `request.indices_options.expand_wildcards` | no | `open` | How to expand wildcards. Valid values are: `all`, `open`, `closed`, and `none` See [`expand_wildcards`](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) for more information. | +| `request.indices_options.expand_wildcards` | no | `open` | How to expand wildcards. Valid values are: `all`, `open`, `closed`, and `none` See [`expand_wildcards`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) for more information. | | `request.indices_options.ignore_unavailable` | no | `true` | Whether the search should ignore unavailable indices. See [`ignore_unavailable`](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) for more information. | | `request.indices_options.allow_no_indices` | no | `true` | Whether to allow a search where a wildcard indices expression results in no concrete indices. See [allow_no_indices](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) for more information. | | `extract` | no | - | A array of JSON keys to extract from the search response and load as the payload. When a search generates a large response, you can use `extract` to select the relevant fields instead of loading the entire response. | diff --git a/explore-analyze/alerts-cases/watcher/transform-search.md b/explore-analyze/alerts-cases/watcher/transform-search.md index c67fa35fdf..b347edebcd 100644 --- a/explore-analyze/alerts-cases/watcher/transform-search.md +++ b/explore-analyze/alerts-cases/watcher/transform-search.md @@ -52,7 +52,7 @@ The following table lists all available settings for the search {{watcher-transf | `request.search_type` | no | query_then_fetch | The search [type](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search). | | `request.indices` | no | all indices | One or more indices to search on. | | `request.body` | no | `match_all` query | The body of the request. The [request body](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) follows the same structure you normally send in the body of a REST `_search` request. The body can be static text or include `mustache` [templates](how-watcher-works.md#templates). | -| `request.indices_options.expand_wildcards` | no | `open` | Determines how to expand indices wildcards. An array consisting of a combination of `open`, `closed`, and `hidden`. Alternatively a value of `none` or `all`. (see [multi-target syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index)) | +| `request.indices_options.expand_wildcards` | no | `open` | Determines how to expand indices wildcards. An array consisting of a combination of `open`, `closed`, and `hidden`. Alternatively a value of `none` or `all`. (see [multi-target syntax](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index)) | | `request.indices_options.ignore_unavailable` | no | `true` | A boolean value that determines whether the search should leniently ignore unavailable indices (see [multi-target syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index)) | | `request.indices_options.allow_no_indices` | no | `true` | A boolean value that determines whether the search should leniently return no results when no indices are resolved (see [multi-target syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index)) | | `request.template` | no | - | The body of the search template. See [configure templates](how-watcher-works.md#templates) for more information. | diff --git a/explore-analyze/alerts-cases/watcher/watch-cluster-status.md b/explore-analyze/alerts-cases/watcher/watch-cluster-status.md index 9a08061f64..461cf3bbac 100644 --- a/explore-analyze/alerts-cases/watcher/watch-cluster-status.md +++ b/explore-analyze/alerts-cases/watcher/watch-cluster-status.md @@ -85,7 +85,7 @@ PUT _watcher/watch/cluster_health_watch It would be a good idea to create a user with the minimum privileges required for use with such a watch configuration. -Depending on how your cluster is configured, there may be additional settings required before the watch can access your cluster such as keystores, truststores, or certificates. For more information, see [{{watcher}} settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html). +Depending on how your cluster is configured, there may be additional settings required before the watch can access your cluster such as keystores, truststores, or certificates. For more information, see [{{watcher}} settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md). If you check the watch history, you’ll see that the cluster status is recorded as part of the `watch_record` each time the watch executes. diff --git a/explore-analyze/alerts-cases/watcher/watcher-ui.md b/explore-analyze/alerts-cases/watcher/watcher-ui.md index a66d440114..815a2ab274 100644 --- a/explore-analyze/alerts-cases/watcher/watcher-ui.md +++ b/explore-analyze/alerts-cases/watcher/watcher-ui.md @@ -32,7 +32,7 @@ If you are creating a threshold watch, you must also have the `view_index_metada A threshold alert is one of the most common types of watches that you can create. This alert periodically checks when your data is above, below, equals, or is in between a certain threshold within a given time interval. -The following example walks you through creating a threshold alert. The alert is triggered when the maximum total CPU usage on a machine goes above a certain percentage. The example uses [Metricbeat](https://www.elastic.co/products/beats/metricbeat) to collect metrics from your systems and services. [Learn more](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html) on how to install and get started with Metricbeat. +The following example walks you through creating a threshold alert. The alert is triggered when the maximum total CPU usage on a machine goes above a certain percentage. The example uses [Metricbeat](https://www.elastic.co/products/beats/metricbeat) to collect metrics from your systems and services. [Learn more](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md) on how to install and get started with Metricbeat. ### Define the watch input and schedule [_define_the_watch_input_and_schedule] @@ -129,7 +129,7 @@ On the Watch overview page, click **Create** and choose **Create advanced watch* The **Simulate** tab allows you to override parts of the watch, and then run a simulation. Be aware of these implementation details on overrides: -* Trigger overrides use [date math](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#date-math). +* Trigger overrides use [date math](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#date-math). * Input overrides accepts a JSON blob. * Condition overrides indicates if you want to force the condition to always be `true`. * Action overrides support [multiple options](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch). diff --git a/explore-analyze/dashboards/create-dashboard-of-panels-with-ecommerce-data.md b/explore-analyze/dashboards/create-dashboard-of-panels-with-ecommerce-data.md index 14fb1cbed5..22675d5b03 100644 --- a/explore-analyze/dashboards/create-dashboard-of-panels-with-ecommerce-data.md +++ b/explore-analyze/dashboards/create-dashboard-of-panels-with-ecommerce-data.md @@ -37,7 +37,7 @@ Open the visualization editor, then make sure the correct fields appear. ## Create visualizations with custom time intervals [custom-time-interval] -When you create visualizations with time series data, you can use the default time interval or increase and decrease the interval. For performance reasons, the visualization editor allows you to choose the minimum time interval, but not the exact time interval. The interval limit is controlled by the [`histogram:maxBars`](https://www.elastic.co/guide/en/kibana/current/advanced-options.html#histogram-maxbars) setting and [time range](../query-filter/filtering.md). +When you create visualizations with time series data, you can use the default time interval or increase and decrease the interval. For performance reasons, the visualization editor allows you to choose the minimum time interval, but not the exact time interval. The interval limit is controlled by the [`histogram:maxBars`](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md#histogram-maxbars) setting and [time range](../query-filter/filtering.md). To analyze the data with a custom time interval, create a bar chart that shows you how many orders were made at your store every hour: diff --git a/explore-analyze/dashboards/create-dashboard-of-panels-with-web-server-data.md b/explore-analyze/dashboards/create-dashboard-of-panels-with-web-server-data.md index c05439e3c8..7dd8803bc1 100644 --- a/explore-analyze/dashboards/create-dashboard-of-panels-with-web-server-data.md +++ b/explore-analyze/dashboards/create-dashboard-of-panels-with-web-server-data.md @@ -118,7 +118,7 @@ To increase the minimum time interval: 1. In the layer pane, click **timestamp**. 2. Change the **Minimum interval** to **1d**, then click **Close**. - You can increase and decrease the minimum interval, but you are unable to decrease the interval below the configured [**Advanced Settings**](https://www.elastic.co/guide/en/kibana/current/advanced-options.html). + You can increase and decrease the minimum interval, but you are unable to decrease the interval below the configured [**Advanced Settings**](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md). To save space on the dashboard, hide the axis labels. diff --git a/explore-analyze/dashboards/drilldowns.md b/explore-analyze/dashboards/drilldowns.md index b5d5c10ed0..764cd1e3ca 100644 --- a/explore-analyze/dashboards/drilldowns.md +++ b/explore-analyze/dashboards/drilldowns.md @@ -79,7 +79,7 @@ Create a drilldown that opens the **Detailed logs** dashboard from the **[Logs] ## Create URL drilldowns [create-url-drilldowns] -URL drilldowns enable you to navigate from a dashboard to external websites. Destination URLs can be dynamic, depending on the dashboard context or user interaction with a panel. To create URL drilldowns, you add [variables](https://www.elastic.co/guide/en/kibana/current/drilldowns.html) to a URL template, which configures the behavior of the drilldown. All panels that you create with the visualization editors support dashboard drilldowns. +URL drilldowns enable you to navigate from a dashboard to external websites. Destination URLs can be dynamic, depending on the dashboard context or user interaction with a panel. To create URL drilldowns, you add [variables](/explore-analyze/dashboards/drilldowns.md) to a URL template, which configures the behavior of the drilldown. All panels that you create with the visualization editors support dashboard drilldowns. ![Drilldown on pie chart that navigates to Github](../../images/kibana-dashboard_urlDrilldownGoToGitHub_8.3.gif "") diff --git a/explore-analyze/discover/discover-get-started.md b/explore-analyze/discover/discover-get-started.md index ebc0d803a1..d141f88989 100644 --- a/explore-analyze/discover/discover-get-started.md +++ b/explore-analyze/discover/discover-get-started.md @@ -283,5 +283,5 @@ This section references common questions and issues encountered when using Disco This can happen in several cases: -* With runtime fields and regular keyword fields, when the string exceeds the value set for the [ignore_above](https://www.elastic.co/guide/en/elasticsearch/reference/current/ignore-above.html) setting used when indexing the data into {{es}}. +* With runtime fields and regular keyword fields, when the string exceeds the value set for the [ignore_above](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ignore-above.md) setting used when indexing the data into {{es}}. * Due to the structure of nested fields, a leaf field added to the table as a column will not contain values in any of its cells. Instead, add the root field as a column to view a JSON representation of its values. Learn more in [this blog post](https://www.elastic.co/de/blog/discover-uses-fields-api-in-7-12). diff --git a/explore-analyze/discover/document-explorer.md b/explore-analyze/discover/document-explorer.md index b12de18dac..4da403fd96 100644 --- a/explore-analyze/discover/document-explorer.md +++ b/explore-analyze/discover/document-explorer.md @@ -53,7 +53,7 @@ You can define different settings for the header row and body rows. ### Limit the sample size [document-explorer-sample-size] -When the number of results returned by your search query (displayed at the top of the **Documents** or **Results** tab) is greater than the value of [`discover:sampleSize`](https://www.elastic.co/guide/en/kibana/current/advanced-options.html#kibana-discover-settings), the number of results displayed in the table is limited to the configured value by default. You can adjust the initial sample size for searches to any number between 10 and `discover:sampleSize` from the **Display options** located in the table toolbar. +When the number of results returned by your search query (displayed at the top of the **Documents** or **Results** tab) is greater than the value of [`discover:sampleSize`](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md#kibana-discover-settings), the number of results displayed in the table is limited to the configured value by default. You can adjust the initial sample size for searches to any number between 10 and `discover:sampleSize` from the **Display options** located in the table toolbar. On the last page of the table, a message indicates that you’ve reached the end of the loaded search results. From that message, you can choose to load more results to continue exploring. diff --git a/explore-analyze/discover/search-sessions.md b/explore-analyze/discover/search-sessions.md index 2c398f7277..3b84bc19d9 100644 --- a/explore-analyze/discover/search-sessions.md +++ b/explore-analyze/discover/search-sessions.md @@ -17,7 +17,7 @@ Search Sessions are deprecated and will be removed in a future version. Sometimes you might need to search through large amounts of data, no matter how long the search takes. Consider a threat hunting scenario, where you need to search through years of data. You can save a long-running search, so {{kib}} processes your request in the background, and you can continue your work. -Save your search session from **Discover** or **Dashboard**, and when your session is complete, view and manage it in **Stack Management**. Search sessions are [enabled by default](https://www.elastic.co/guide/en/kibana/current/search-session-settings-kb.html). +Save your search session from **Discover** or **Dashboard**, and when your session is complete, view and manage it in **Stack Management**. Search sessions are [enabled by default](asciidocalypse://docs/kibana/docs/reference/configuration-reference/search-sessions-settings.md). :::{image} ../../images/kibana-search-session.png :alt: Search Session indicator displaying the current state of the search diff --git a/explore-analyze/find-and-organize/data-views.md b/explore-analyze/find-and-organize/data-views.md index d951b9b320..4b35e2a468 100644 --- a/explore-analyze/find-and-organize/data-views.md +++ b/explore-analyze/find-and-organize/data-views.md @@ -476,7 +476,7 @@ Built-in validation is unsupported for scripted fields. When your scripts contai 5. Select **Set format**, then enter the **Format** for the field. ::::{note} -For numeric fields the default field formatters are based on the `meta.unit` field. The unit is associated with a [time unit](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units), percent, or byte. The convention for percents is to use value 1 to mean 100%. +For numeric fields the default field formatters are based on the `meta.unit` field. The unit is associated with a [time unit](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units), percent, or byte. The convention for percents is to use value 1 to mean 100%. :::: @@ -613,7 +613,7 @@ Numeric fields support **Bytes**, **Color**, **Duration**, **Histogram**, **Numb The **Bytes**, **Number**, and **Percentage** formatters enable you to choose the display formats of numbers in the field using the [Elastic numeral pattern](../../explore-analyze/numeral-formatting.md) syntax that {{kib}} maintains. -The **Histogram** formatter is used only for the [histogram field type](https://www.elastic.co/guide/en/elasticsearch/reference/current/histogram.html). When you use the **Histogram** formatter, you can apply the **Bytes**, **Number**, or **Percentage** format to aggregated data. +The **Histogram** formatter is used only for the [histogram field type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/histogram.md). When you use the **Histogram** formatter, you can apply the **Bytes**, **Number**, or **Percentage** format to aggregated data. You can specify the following types to the `Url` field formatter: diff --git a/explore-analyze/find-and-organize/saved-objects.md b/explore-analyze/find-and-organize/saved-objects.md index 06d40c4cd1..3d86cdcc24 100644 --- a/explore-analyze/find-and-organize/saved-objects.md +++ b/explore-analyze/find-and-organize/saved-objects.md @@ -149,7 +149,7 @@ After you upgrade, or if you set up a new {{kib}} instance using version 8.x or #### Accessing saved objects using old URLs [saved-object-ids-impact-when-using-legacy-urls] -When you upgrade {{kib}} and saved object IDs change, the "deep link" URLs to access those saved objects will also change. To reduce the impact, each existing URL is preserved with a special [legacy URL alias](https://www.elastic.co/guide/en/kibana/current/legacy-url-aliases.html). This means that if you use a bookmark for a saved object ID that was changed, you’ll be redirected to the new URL for that saved object. +When you upgrade {{kib}} and saved object IDs change, the "deep link" URLs to access those saved objects will also change. To reduce the impact, each existing URL is preserved with a special [legacy URL alias](asciidocalypse://docs/kibana/docs/extend/contribute-to-kibana/legacy-url-aliases.md). This means that if you use a bookmark for a saved object ID that was changed, you’ll be redirected to the new URL for that saved object. #### Importing and copying saved objects [saved-object-ids-impact-when-using-import-and-copy] diff --git a/explore-analyze/geospatial-analysis.md b/explore-analyze/geospatial-analysis.md index 8c4bfbc745..08cdf3a767 100644 --- a/explore-analyze/geospatial-analysis.md +++ b/explore-analyze/geospatial-analysis.md @@ -15,7 +15,7 @@ Not sure where to get started with {{es}} and geo? Then, you have come to the ri ## Geospatial mapping [geospatial-mapping] -{{es}} supports two types of geo data: [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) fields which support lat/lon pairs, and [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html) fields, which support points, lines, circles, polygons, multi-polygons, and so on. Use [explicit mapping](../manage-data/data-store/mapping/explicit-mapping.md) to index geo data fields. +{{es}} supports two types of geo data: [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) fields which support lat/lon pairs, and [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md) fields, which support points, lines, circles, polygons, multi-polygons, and so on. Use [explicit mapping](../manage-data/data-store/mapping/explicit-mapping.md) to index geo data fields. Have an index with lat/lon pairs but no geo_point mapping? Use [runtime fields](../manage-data/data-store/mapping/map-runtime-field.md) to make a geo_point field without reindexing. @@ -24,20 +24,20 @@ Have an index with lat/lon pairs but no geo_point mapping? Use [runtime fields]( Data is often messy and incomplete. [Ingest pipelines](../manage-data/ingest/transform-enrich/ingest-pipelines.md) lets you clean, transform, and augment your data before indexing. -* Use [CSV](https://www.elastic.co/guide/en/elasticsearch/reference/current/csv-processor.html) together with [explicit mapping](../manage-data/data-store/mapping/explicit-mapping.md) to index CSV files with geo data. Kibana’s [Import CSV](visualize/maps/import-geospatial-data.md) feature can help with this. -* Use [GeoIP](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) to add geographical location of an IPv4 or IPv6 address. -* Use [geo-grid processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest-geo-grid-processor.html) to convert grid tiles or hexagonal cell ids to bounding boxes or polygons which describe their shape. +* Use [CSV](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/csv-processor.md) together with [explicit mapping](../manage-data/data-store/mapping/explicit-mapping.md) to index CSV files with geo data. Kibana’s [Import CSV](visualize/maps/import-geospatial-data.md) feature can help with this. +* Use [GeoIP](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) to add geographical location of an IPv4 or IPv6 address. +* Use [geo-grid processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/ingest-geo-grid-processor.md) to convert grid tiles or hexagonal cell ids to bounding boxes or polygons which describe their shape. * Use [geo_match enrich policy](../manage-data/ingest/transform-enrich/example-enrich-data-based-on-geolocation.md) for reverse geocoding. For example, use [reverse geocoding](visualize/maps/reverse-geocoding-tutorial.md) to visualize metropolitan areas by web traffic. ## Query [geospatial-query] -[Geo queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-queries.html) answer location-driven questions. Find documents that intersect with, are within, are contained by, or do not intersect your query geometry. Combine geospatial queries with full text search queries for unparalleled searching experience. For example, "Show me all subscribers that live within 5 miles of our new gym location, that joined in the last year and have running mentioned in their profile". +[Geo queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/geo-queries.md) answer location-driven questions. Find documents that intersect with, are within, are contained by, or do not intersect your query geometry. Combine geospatial queries with full text search queries for unparalleled searching experience. For example, "Show me all subscribers that live within 5 miles of our new gym location, that joined in the last year and have running mentioned in their profile". ## ES|QL [esql-query] -[ES|QL](query-filter/languages/esql.md) has support for [Geospatial Search](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-spatial-functions) functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points. +[ES|QL](query-filter/languages/esql.md) has support for [Geospatial Search](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-functions-operators.md#esql-spatial-functions) functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points. * [`ST_INTERSECTS`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-st_intersects) * [`ST_DISJOINT`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-st_disjoint) @@ -48,22 +48,22 @@ Data is often messy and incomplete. [Ingest pipelines](../manage-data/ingest/tra ## Aggregate [geospatial-aggregate] -[Aggregations](query-filter/aggregations.md) summarizes your data as metrics, statistics, or other analytics. Use [bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) to group documents into buckets, also called bins, based on field values, ranges, or other criteria. Then, use [metric aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) to calculate metrics, such as a sum or average, from field values in each bucket. Compare metrics across buckets to gain insights from your data. +[Aggregations](query-filter/aggregations.md) summarizes your data as metrics, statistics, or other analytics. Use [bucket aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/bucket.md) to group documents into buckets, also called bins, based on field values, ranges, or other criteria. Then, use [metric aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/metrics.md) to calculate metrics, such as a sum or average, from field values in each bucket. Compare metrics across buckets to gain insights from your data. Geospatial bucket aggregations: -* [Geo-distance aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html) evaluates the distance of each geo_point location from an origin point and determines the buckets it belongs to based on the ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance range of the bucket). -* [Geohash grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html) groups geo_point and geo_shape values into buckets that represent a grid. -* [Geohex grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohexgrid-aggregation.html) groups geo_point and geo_shape values into buckets that represent an H3 hexagonal cell. -* [Geotile grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geotilegrid-aggregation.html) groups geo_point and geo_shape values into buckets that represent a grid. Each cell corresponds to a [map tile](https://en.wikipedia.org/wiki/Tiled_web_map) as used by many online map sites. +* [Geo-distance aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geodistance-aggregation.md) evaluates the distance of each geo_point location from an origin point and determines the buckets it belongs to based on the ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance range of the bucket). +* [Geohash grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geohashgrid-aggregation.md) groups geo_point and geo_shape values into buckets that represent a grid. +* [Geohex grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geohexgrid-aggregation.md) groups geo_point and geo_shape values into buckets that represent an H3 hexagonal cell. +* [Geotile grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geotilegrid-aggregation.md) groups geo_point and geo_shape values into buckets that represent a grid. Each cell corresponds to a [map tile](https://en.wikipedia.org/wiki/Tiled_web_map) as used by many online map sites. Geospatial metric aggregations: -* [Geo-bounds aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html) computes the geographic bounding box containing all values for a Geopoint or Geoshape field. -* [Geo-centroid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geocentroid-aggregation.html) computes the weighted centroid from all coordinate values for geo fields. -* [Geo-line aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geo-line.html) aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create [vehicle tracks](visualize/maps/asset-tracking-tutorial.md). +* [Geo-bounds aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-geobounds-aggregation.md) computes the geographic bounding box containing all values for a Geopoint or Geoshape field. +* [Geo-centroid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-geocentroid-aggregation.md) computes the weighted centroid from all coordinate values for geo fields. +* [Geo-line aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-geo-line.md) aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create [vehicle tracks](visualize/maps/asset-tracking-tutorial.md). -Combine aggregations to perform complex geospatial analysis. For example, to calculate the most recent GPS tracks per flight, use a [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) to group documents into buckets per aircraft. Then use geo-line aggregation to compute a track for each aircraft. In another example, use geotile grid aggregation to group documents into a grid. Then use geo-centroid aggregation to find the weighted centroid of each grid cell. +Combine aggregations to perform complex geospatial analysis. For example, to calculate the most recent GPS tracks per flight, use a [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) to group documents into buckets per aircraft. Then use geo-line aggregation to compute a track for each aircraft. In another example, use geotile grid aggregation to group documents into a grid. Then use geo-centroid aggregation to find the weighted centroid of each grid cell. ## Integrate [geospatial-integrate] diff --git a/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md b/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md index 1dcdbda6dc..942454ff36 100644 --- a/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md +++ b/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md @@ -114,7 +114,7 @@ When working with large model sizes, consider how frequently you want to create Also consider how long you wish to retain snapshots using `model_snapshot_retention_days` and `daily_model_snapshot_retention_after_days`. Retaining fewer snapshots substantially reduces index storage requirements for model state, but also reduces the granularity of model snapshots from which you can revert. -For more information, refer to [Model snapshots](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-model-snapshots). +For more information, refer to [Model snapshots](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-model-snapshots). ## 12. Optimize your search queries [search-queries] diff --git a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md index 68ca2bd8df..e116e930c3 100644 --- a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md +++ b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md @@ -15,9 +15,9 @@ If your data includes geographic fields, you can use {{ml-features}} to detect a To run this type of {{anomaly-job}}, you must have [{{ml-features}} set up](../setting-up-machine-learning.md). You must also have time series data that contains spatial data types. In particular, you must have: * two comma-separated numbers of the form `latitude,longitude`, -* a [`geo_point`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) field, -* a [`geo_shape`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html) field that contains point values, or -* a [`geo_centroid`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geocentroid-aggregation.html) aggregation +* a [`geo_point`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) field, +* a [`geo_shape`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md) field that contains point values, or +* a [`geo_centroid`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-geocentroid-aggregation.md) aggregation The latitude and longitude must be in the range -180 to 180 and represent a point on the surface of the Earth. @@ -37,9 +37,9 @@ To get the best results from {{ml}} analytics, you must understand your data. Yo There are a few limitations to consider before you create this type of job: 1. You cannot create forecasts for {{anomaly-jobs}} that contain geographic functions. -2. You cannot add [custom rules with conditions](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-rules) to detectors that use geographic functions. +2. You cannot add [custom rules with conditions](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-rules) to detectors that use geographic functions. -If those limitations are acceptable, try creating an {{anomaly-job}} that uses the [`lat_long` function](https://www.elastic.co/guide/en/machine-learning/current/ml-geo-functions.html#ml-lat-long) to analyze your own data or the sample data sets. +If those limitations are acceptable, try creating an {{anomaly-job}} that uses the [`lat_long` function](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-geo-functions.md#ml-lat-long) to analyze your own data or the sample data sets. To create an {{anomaly-job}} that uses the `lat_long` function, in {{kib}} you must click **Create job** on the **{{ml-cap}} > {{anomaly-detect-cap}} > Jobs** page and select the advanced job wizard. Alternatively, use the [create {{anomaly-jobs}} API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job). @@ -204,7 +204,7 @@ You can also view the anomaly in **Maps** by clicking **View in Maps** in the ac When you try this type of {{anomaly-job}} with your own data, it might take some experimentation to find the best combination of buckets, detectors, and influencers to detect the type of behavior you’re seeking. -For more information about {{anomaly-detect}} concepts, see [Concepts](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-overview.html). For the full list of functions that you can use in {{anomaly-jobs}}, see [*Function reference*](ml-functions.md). For more {{anomaly-detect}} examples, see [Examples](https://www.elastic.co/guide/en/machine-learning/current/anomaly-how-tos.html). +For more information about {{anomaly-detect}} concepts, see [Concepts](/explore-analyze/machine-learning/anomaly-detection.md). For the full list of functions that you can use in {{anomaly-jobs}}, see [*Function reference*](ml-functions.md). For more {{anomaly-detect}} examples, see [Examples](/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md). ## Add anomaly layers to your maps [geographic-anomalies-map-layer] diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md b/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md index b4dd2e507a..273935aa3c 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md @@ -45,7 +45,7 @@ The {{ml-features}} use the concept of a *bucket* to divide the time series into The *bucket span* is part of the configuration information for an {{anomaly-job}}. It defines the time interval that is used to summarize and model the data. This is typically between 5 minutes to 1 hour and it depends on your data characteristics. When you set the bucket span, take into account the granularity at which you want to analyze, the frequency of the input data, the typical duration of the anomalies, and the frequency at which alerting is required. -The bucket span must contain a valid [time interval](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units). When you create an {{anomaly-job}} in {{kib}}, you can choose to estimate a bucket span value based on your data characteristics. If you choose a value that is larger than one day or is significantly different than the estimated value, you receive an informational message. +The bucket span must contain a valid [time interval](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units). When you create an {{anomaly-job}} in {{kib}}, you can choose to estimate a bucket span value based on your data characteristics. If you choose a value that is larger than one day or is significantly different than the estimated value, you receive an informational message. ### Detectors [ml-ad-detectors] @@ -118,7 +118,7 @@ For each {{anomaly-job}}, you can optionally specify a dedicated index to store If you create {{anomaly-jobs}} in {{kib}}, you *must* use {{dfeeds}} to retrieve data from {{es}} for analysis. When you create an {{anomaly-job}}, you select a {{data-source}} and {{kib}} configures the {{dfeed}} for you under the covers. -You can associate only one {{dfeed}} with each {{anomaly-job}}. The {{dfeed}} contains a query that runs at a defined interval (`frequency`). By default, this interval is calculated relative to the [bucket span](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job) of the {{anomaly-job}}. If you are concerned about delayed data, you can add a delay before the query runs at each interval. See [Handling delayed data](ml-delayed-data-detection.md). +You can associate only one {{dfeed}} with each {{anomaly-job}}. The {{dfeed}} contains a query that runs at a defined interval (`frequency`). By default, this interval is calculated relative to the [bucket span](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job) of the {{anomaly-job}}. If you are concerned about delayed data, you can add a delay before the query runs at each interval. See [Handling delayed data](ml-delayed-data-detection.md). {{dfeeds-cap}} can also aggregate data before sending it to the {{anomaly-job}}. There are some limitations, however, and aggregations should generally be used only for low cardinality data. See [Aggregating data for faster performance](ml-configuring-aggregation.md). @@ -159,7 +159,7 @@ If you want to add multiple scheduled events at once, you can import an iCalenda * You must identify scheduled events before your {{anomaly-job}} analyzes the data for that time period. Machine learning results are not updated retroactively. * If your iCalendar file contains recurring events, only the first occurrence is imported. -* [Bucket results](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-view-results.html#ml-ad-bucket-results) are generated during scheduled events but they have an anomaly score of zero. +* [Bucket results](/explore-analyze/machine-learning/anomaly-detection/ml-ad-view-results.md#ml-ad-bucket-results) are generated during scheduled events but they have an anomaly score of zero. * If you use long or frequent scheduled events, it might take longer for the {{ml}} analytics to learn to model your data and some anomalous behavior might be missed. :::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-aggregation.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-aggregation.md index 7610aaa8ba..f31a66b618 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-aggregation.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-aggregation.md @@ -34,13 +34,13 @@ There are a number of requirements for using aggregations in {{dfeeds}}. * If your [{{dfeed}} uses aggregations with nested `terms` aggs](#aggs-dfeeds) and model plot is not enabled for the {{anomaly-job}}, neither the **Single Metric Viewer** nor the **Anomaly Explorer** can plot and display an anomaly chart. In these cases, an explanatory message is shown instead of the chart. * Your {{dfeed}} can contain multiple aggregations, but only the ones with names that match values in the job configuration are fed to the job. -* Using [scripted metric](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) aggregations is not supported in {{dfeeds}}. +* Using [scripted metric](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) aggregations is not supported in {{dfeeds}}. ## Recommendations [aggs-recommendations-dfeeds] -* When your detectors use [metric](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html) or [sum](https://www.elastic.co/guide/en/machine-learning/current/ml-sum-functions.html) analytical functions, it’s recommended to set the `date_histogram` or `composite` aggregation interval to a tenth of the bucket span. This creates finer, more granular time buckets, which are ideal for this type of analysis. -* When your detectors use [count](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) or [rare](https://www.elastic.co/guide/en/machine-learning/current/ml-rare-functions.html) functions, set the interval to the same value as the bucket span. -* If you have multiple influencers or partition fields or if your field cardinality is more than 1000, use [composite aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html). +* When your detectors use [metric](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-metric-functions.md) or [sum](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-sum-functions.md) analytical functions, it’s recommended to set the `date_histogram` or `composite` aggregation interval to a tenth of the bucket span. This creates finer, more granular time buckets, which are ideal for this type of analysis. +* When your detectors use [count](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-count-functions.md) or [rare](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-rare-functions.md) functions, set the interval to the same value as the bucket span. +* If you have multiple influencers or partition fields or if your field cardinality is more than 1000, use [composite aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-composite-aggregation.md). To determine the cardinality of your data, you can run searches such as: @@ -254,7 +254,7 @@ Use the following format to define a composite aggregation in your {{dfeed}}: You can also use complex nested aggregations in {{dfeeds}}. -The next example uses the [`derivative` pipeline aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html) to find the first order derivative of the counter `system.network.out.bytes` for each value of the field `beat.name`. +The next example uses the [`derivative` pipeline aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-derivative-aggregation.md) to find the first order derivative of the counter `system.network.out.bytes` for each value of the field `beat.name`. ::::{note} `derivative` or other pipeline aggregations may not work within `composite` aggregations. See [composite aggregations and pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html#search-aggregations-bucket-composite-aggregation-pipeline-aggregations). @@ -346,7 +346,7 @@ You can also use single bucket aggregations in {{dfeeds}}. The following example It is not currently possible to use `aggregate_metric_double` type fields in {{dfeeds}} without aggregations. :::: -You can use fields with the [`aggregate_metric_double`](https://www.elastic.co/guide/en/elasticsearch/reference/current/aggregate-metric-double.html) field type in a {{dfeed}} with aggregations. It is required to retrieve the `value_count` of the `aggregate_metric_double` filed in an aggregation and then use it as the `summary_count_field_name` to provide the correct count that represents the aggregation value. +You can use fields with the [`aggregate_metric_double`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/aggregate-metric-double.md) field type in a {{dfeed}} with aggregations. It is required to retrieve the `value_count` of the `aggregate_metric_double` filed in an aggregation and then use it as the `summary_count_field_name` to provide the correct count that represents the aggregation value. In the following example, `presum` is an `aggregate_metric_double` type field that has all the possible metrics: `[ min, max, sum, value_count ]`. To use an `avg` aggregation on this field, you need to perform a `value_count` aggregation on `presum` and then set the field that contains the aggregated values `my_count` as the `summary_count_field_name`: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-categories.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-categories.md index cb39632fbe..13bbad0173 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-categories.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-categories.md @@ -8,7 +8,7 @@ mapped_pages: # Detecting anomalous categories of data [ml-configuring-categories] -Categorization is a {{ml}} process that tokenizes a text field, clusters similar data together, and classifies it into categories. It works best on machine-written messages and application output that typically consist of repeated elements. [Categorization jobs](ml-anomaly-detection-job-types.md#categorization-jobs) enable you to find anomalous behavior in your categorized data. Categorization is not natural language processing (NLP). When you create a categorization {{anomaly-job}}, the {{ml}} model learns what volume and pattern is normal for each category over time. You can then detect anomalies and surface rare events or unusual types of messages by using [count](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) or [rare](https://www.elastic.co/guide/en/machine-learning/current/ml-rare-functions.html) functions. Categorization works well on finite set of possible messages, for example: +Categorization is a {{ml}} process that tokenizes a text field, clusters similar data together, and classifies it into categories. It works best on machine-written messages and application output that typically consist of repeated elements. [Categorization jobs](ml-anomaly-detection-job-types.md#categorization-jobs) enable you to find anomalous behavior in your categorized data. Categorization is not natural language processing (NLP). When you create a categorization {{anomaly-job}}, the {{ml}} model learns what volume and pattern is normal for each category over time. You can then detect anomalies and surface rare events or unusual types of messages by using [count](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-count-functions.md) or [rare](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-rare-functions.md) functions. Categorization works well on finite set of possible messages, for example: ```js {"@timestamp":1549596476000, @@ -84,7 +84,7 @@ Another advanced option is the `categorization_filters` property, which can cont ## Per-partition categorization [ml-per-partition-categorization] -If you enable per-partition categorization, categories are determined independently for each partition. For example, if your data includes messages from multiple types of logs from different applications, you can use a field like the ECS [`event.dataset` field](https://www.elastic.co/guide/en/ecs/current/ecs-event.html) as the `partition_field_name` and categorize the messages for each type of log separately. +If you enable per-partition categorization, categories are determined independently for each partition. For example, if your data includes messages from multiple types of logs from different applications, you can use a field like the ECS [`event.dataset` field](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-event.md) as the `partition_field_name` and categorize the messages for each type of log separately. If your job has multiple detectors, every detector that uses the `mlcategory` keyword must also define a `partition_field_name`. You must use the same `partition_field_name` value in all of these detectors. Otherwise, when you create or update a job and enable per-partition categorization, it fails. @@ -101,7 +101,7 @@ If you use the categorization wizard in {{kib}}, you can see which categorizatio :class: screenshot ::: -The categorization analyzer can refer to a built-in {{es}} analyzer or a combination of zero or more character filters, a tokenizer, and zero or more token filters. In this example, adding a [`pattern_replace` character filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-pattern-replace-charfilter.html) achieves the same behavior as the `categorization_filters` job configuration option described earlier. For more details about these properties, refer to the [`categorization_analyzer` API object](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job#ml-put-job-request-body). +The categorization analyzer can refer to a built-in {{es}} analyzer or a combination of zero or more character filters, a tokenizer, and zero or more token filters. In this example, adding a [`pattern_replace` character filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-pattern-replace-charfilter.md) achieves the same behavior as the `categorization_filters` job configuration option described earlier. For more details about these properties, refer to the [`categorization_analyzer` API object](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job#ml-put-job-request-body). If you use the default categorization analyzer in {{kib}} or omit the `categorization_analyzer` property from the API, the following default values are used: @@ -137,7 +137,7 @@ POST _ml/anomaly_detectors/_validate If you specify any part of the `categorization_analyzer`, however, any omitted sub-properties are *not* set to default values. -The `ml_standard` tokenizer and the day and month stopword filter are almost equivalent to the following analyzer, which is defined using only built-in {{es}} [tokenizers](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenizers.html) and [token filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenfilters.html): +The `ml_standard` tokenizer and the day and month stopword filter are almost equivalent to the following analyzer, which is defined using only built-in {{es}} [tokenizers](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/tokenizer-reference.md) and [token filters](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/token-filter-reference.md): ```console PUT _ml/anomaly_detectors/it_ops_new_logs diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-populations.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-populations.md index 30144f14a3..5fb1740d63 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-populations.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-populations.md @@ -22,7 +22,7 @@ Population analysis is resource-efficient and scales well, enabling the analysis ## Creating population jobs [creating-population-jobs] -1. In {{kib}}, navigate to **Jobs**. To open **Jobs**, find **{{ml-app}} > Anomaly Detection** in the main menu, or use the [global search field](https://www.elastic.co/guide/en/kibana/current/kibana-concepts-analysts.html#_finding_your_apps_and_objects). +1. In {{kib}}, navigate to **Jobs**. To open **Jobs**, find **{{ml-app}} > Anomaly Detection** in the main menu, or use the [global search field](/explore-analyze/query-filter/filtering.md#_finding_your_apps_and_objects). 2. Click **Create job**, select the {{data-source}} you want to analyze. 3. Select the **Population** wizard from the list. 4. Choose a population field - it’s the `clientip` field in this example - and the metric you want to use for the analysis - `Mean(bytes)` in this example. diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md index fa28dfcc9b..6545a10148 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md @@ -74,7 +74,7 @@ PUT /my-index-000001/_doc/1 } ``` -1. In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see [fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html). +1. In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see [fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md). $$$ml-configuring-transform1$$$ @@ -380,7 +380,7 @@ PUT _ml/anomaly_detectors/test3 GET _ml/datafeeds/datafeed-test3/_preview ``` -In {{es}}, location data can be stored in `geo_point` fields but this data type is not supported natively in {{ml}} analytics. This example of a runtime field transforms the data into an appropriate format. For more information, see [Geographic functions](https://www.elastic.co/guide/en/machine-learning/current/ml-geo-functions.html). +In {{es}}, location data can be stored in `geo_point` fields but this data type is not supported natively in {{ml}} analytics. This example of a runtime field transforms the data into an appropriate format. For more information, see [Geographic functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-geo-functions.md). The preview {{dfeed}} API returns the following results, which show that `41.44` and `90.5` have been combined into "41.44,90.5": diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-functions.md b/explore-analyze/machine-learning/anomaly-detection/ml-functions.md index bf3aad97d6..f3f83f130f 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-functions.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-functions.md @@ -18,10 +18,10 @@ You can specify a `summary_count_field_name` with any function except `metric`. If your data is sparse, there may be gaps in the data which means you might have empty buckets. You might want to treat these as anomalies or you might want these gaps to be ignored. Your decision depends on your use case and what is important to you. It also depends on which functions you use. The `sum` and `count` functions are strongly affected by empty buckets. For this reason, there are `non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data. These functions effectively ignore empty buckets. -* [Count functions](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) -* [Geographic functions](https://www.elastic.co/guide/en/machine-learning/current/ml-geo-functions.html) -* [Information content functions](https://www.elastic.co/guide/en/machine-learning/current/ml-info-functions.html) -* [Metric functions](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html) -* [Rare functions](https://www.elastic.co/guide/en/machine-learning/current/ml-rare-functions.html) -* [Sum functions](https://www.elastic.co/guide/en/machine-learning/current/ml-sum-functions.html) -* [Time functions](https://www.elastic.co/guide/en/machine-learning/current/ml-time-functions.html) +* [Count functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-count-functions.md) +* [Geographic functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-geo-functions.md) +* [Information content functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-info-functions.md) +* [Metric functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-metric-functions.md) +* [Rare functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-rare-functions.md) +* [Sum functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-sum-functions.md) +* [Time functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-time-functions.md) diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md b/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md index d3891fe7d2..9776a6cb94 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md @@ -50,7 +50,7 @@ To get the best results from {{ml}} analytics, you must understand your data. Yo 6. Optional: You can change the random sampling behavior, which affects the number of documents per shard that are used in the {{data-viz}}. You can use automatic random sampling that balances accuracy and speed, manual sampling where you can chose a value for the sampling percentage, or you can turn the feaure off to use the full data set. There is a relatively small number of documents in the {{kib}} sample data, so you can turn random sampling off. For larger data sets, keep in mind that using a large sample size increases query run times and increases the load on the cluster. 7. Explore the fields in the {{data-viz}}. - You can filter the list by field names or [field types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). The {{data-viz}} indicates how many of the documents in the sample for the selected time period contain each field. + You can filter the list by field names or [field types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md). The {{data-viz}} indicates how many of the documents in the sample for the selected time period contain each field. In particular, look at the `clientip`, `response.keyword`, and `url.keyword` fields, since we’ll use them in our {{anomaly-jobs}}. For these fields, the {{data-viz}} provides the number of distinct values, a list of the top values, and the number and percentage of documents that contain the field. For example: :::{image} ../../../images/machine-learning-ml-gs-data-keyword.jpg @@ -95,7 +95,7 @@ The job uses *buckets* to divide the time series into batches for processing. Fo Each {{anomaly-job}} contains one or more *detectors*, which define the type of analysis that occurs (for example, `max`, `average`, or `rare` analytical functions) and the fields that are analyzed. Some of the analytical functions look for single anomalous data points. For example, `max` identifies the maximum value that is seen within a bucket. Others perform some aggregation over the length of the bucket. For example, `mean` calculates the mean of all the data points seen within the bucket. -For more information, see [{{dfeeds-cap}}](ml-ad-run-jobs.md#ml-ad-datafeeds), [Buckets](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job), and [*Function reference*](ml-functions.md). +For more information, see [{{dfeeds-cap}}](ml-ad-run-jobs.md#ml-ad-datafeeds), [Buckets](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job), and [*Function reference*](ml-functions.md). :::: @@ -271,7 +271,7 @@ To create a forecast in {{kib}}: :class: screenshot ::: -3. Specify a duration for your forecast. This value indicates how far to extrapolate beyond the last record that was processed. You must use [time units](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units). In this example, the duration is one week (`1w`): +3. Specify a duration for your forecast. This value indicates how far to extrapolate beyond the last record that was processed. You must use [time units](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units). In this example, the duration is one week (`1w`): :::{image} ../../../images/machine-learning-ml-gs-duration.png :alt: Specify a duration of 1w :class: screenshot @@ -311,8 +311,8 @@ If you’re now thinking about where {{anomaly-detect}} can be most impactful fo 2. It should be information that contains key performance indicators for the health, security, or success of your business or system. The better you know the data, the quicker you will be able to create jobs that generate useful insights. 3. Ideally, the data is located in {{es}} and you can therefore create a {{dfeed}} that retrieves data in real time. If your data is outside of {{es}}, you cannot use {{kib}} to create your jobs and you cannot use {{dfeeds}}. -In general, it is a good idea to start with single metric {{anomaly-jobs}} for your key performance indicators. After you examine these simple analysis results, you will have a better idea of what the influencers might be. You can create multi-metric jobs and split the data or create more complex analysis functions as necessary. For examples of more complicated configuration options, see [Examples](https://www.elastic.co/guide/en/machine-learning/current/anomaly-how-tos.html). +In general, it is a good idea to start with single metric {{anomaly-jobs}} for your key performance indicators. After you examine these simple analysis results, you will have a better idea of what the influencers might be. You can create multi-metric jobs and split the data or create more complex analysis functions as necessary. For examples of more complicated configuration options, see [Examples](/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md). -If you want to find more sample jobs, see [Supplied configurations](ootb-ml-jobs.md). In particular, there are sample jobs for [Apache](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-apache.html) and [Nginx](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-nginx.html) that are quite similar to the examples in this tutorial. +If you want to find more sample jobs, see [Supplied configurations](ootb-ml-jobs.md). In particular, there are sample jobs for [Apache](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-apache.md) and [Nginx](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-nginx.md) that are quite similar to the examples in this tutorial. If you encounter problems, we’re here to help. If you are an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](http://support.elastic.co). Or post in the [Elastic forum](https://discuss.elastic.co/). diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-limitations.md b/explore-analyze/machine-learning/anomaly-detection/ml-limitations.md index 3cade2a565..c20c13bcba 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-limitations.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-limitations.md @@ -20,7 +20,7 @@ The following limitations and known problems apply to the 9.0.0-beta1 release of ### CPUs must support SSE4.2 [ml-limitations-sse] -{{ml-cap}} uses Streaming SIMD Extensions (SSE) 4.2 instructions, so it works only on machines whose CPUs [support](https://en.wikipedia.org/wiki/SSE4#Supporting_CPUs) SSE4.2. If you run {{es}} on older hardware you must disable {{ml}} by setting `xpack.ml.enabled` to `false`. See [{{ml-cap}} settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). +{{ml-cap}} uses Streaming SIMD Extensions (SSE) 4.2 instructions, so it works only on machines whose CPUs [support](https://en.wikipedia.org/wiki/SSE4#Supporting_CPUs) SSE4.2. If you run {{es}} on older hardware you must disable {{ml}} by setting `xpack.ml.enabled` to `false`. See [{{ml-cap}} settings in {{es}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). ### CPU scheduling improvements apply to Linux and MacOS only [ml-scheduling-priority] @@ -40,7 +40,7 @@ If you send pre-aggregated data to a job for analysis, you must ensure that the ### Scripted metric aggregations are not supported [_scripted_metric_aggregations_are_not_supported] -Using [scripted metric aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) in {{dfeeds}} is not supported. Refer to the [Aggregating data for faster performance](ml-configuring-aggregation.md) page to learn more about aggregations in {{dfeeds}}. +Using [scripted metric aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) in {{dfeeds}} is not supported. Refer to the [Aggregating data for faster performance](ml-configuring-aggregation.md) page to learn more about aggregations in {{dfeeds}}. ### Fields named "by", "count", or "over" cannot be used to split data [_fields_named_by_count_or_over_cannot_be_used_to_split_data] @@ -124,7 +124,7 @@ In {{kib}}, **Anomaly Explorer** and **Single Metric Viewer** charts are not dis * for anomalies that were due to categorization (if model plot is not enabled), * if the {{dfeed}} uses scripted fields and model plot is not enabled (except for scripts that define metric fields), -* if the {{dfeed}} uses [composite aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html) that have composite sources other than `terms` and `date_histogram`, +* if the {{dfeed}} uses [composite aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-composite-aggregation.md) that have composite sources other than `terms` and `date_histogram`, * if your [{{dfeed}} uses aggregations with nested `terms` aggs](ml-configuring-aggregation.md#aggs-dfeeds) and model plot is not enabled, * `freq_rare` functions, * `info_content`, `high_info_content`, `low_info_content` functions, @@ -138,22 +138,22 @@ The charts can also look odd in circumstances where there is very little data to | Detector functions | Function description | Supported | | --- | --- | --- | -| count, high_count, low_count, non_zero_count, low_non_zero_count | [Count functions](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) | yes | +| count, high_count, low_count, non_zero_count, low_non_zero_count | [Count functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-count-functions.md) | yes | | count, high_count, low_count, non_zero_count, low_non_zero_count with summary_count_field_name that is not doc_count (model plot not enabled) | [Count functions](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) | yes | | non_zero_count with summary_count_field that is not doc_count using cardinality aggregation in datafeed config (model plot not enabled) | [Count functions](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) | yes | | distinct_count, high_distinct_count, low_distinct_count | [Count functions](https://www.elastic.co/guide/en/machine-learning/current/ml-count-functions.html) | yes | -| mean, high_mean, low_mean | [Mean, high_mean, low_mean](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-mean) | yes | +| mean, high_mean, low_mean | [Mean, high_mean, low_mean](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-metric-functions.md#ml-metric-mean) | yes | | min | [Min](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-min) | yes | | max | [Max](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-max) | yes | | metric | [Metric](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-metric) | yes | | median, high_median, low_median | [Median, high_median, low_median](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-median) | yes | -| sum, high_sum ,low_sum, non_null_sum, high_non_null_sum, low_non_null_sum | [Sum functions](https://www.elastic.co/guide/en/machine-learning/current/ml-sum-functions.html) | yes | +| sum, high_sum ,low_sum, non_null_sum, high_non_null_sum, low_non_null_sum | [Sum functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-sum-functions.md) | yes | | varp, high_varp, low_varp | [Varp, high_varp, low_varp](https://www.elastic.co/guide/en/machine-learning/current/ml-metric-functions.html#ml-metric-varp) | yes (only if model plot is enabled) | -| lat_long | [Lat_long](https://www.elastic.co/guide/en/machine-learning/current/ml-geo-functions.html#ml-lat-long) | no (but map is displayed in the Anomaly Explorer) | -| info_content, high_info_content, low_info_content | [Info_content, High_info_content, Low_info_content](https://www.elastic.co/guide/en/machine-learning/current/ml-info-functions.html#ml-info-content) | yes (only if model plot is enabled) | -| rare | [Rare](https://www.elastic.co/guide/en/machine-learning/current/ml-rare-functions.html#ml-rare) | yes | +| lat_long | [Lat_long](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-geo-functions.md#ml-lat-long) | no (but map is displayed in the Anomaly Explorer) | +| info_content, high_info_content, low_info_content | [Info_content, High_info_content, Low_info_content](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-info-functions.md#ml-info-content) | yes (only if model plot is enabled) | +| rare | [Rare](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-rare-functions.md#ml-rare) | yes | | freq_rare | [Freq_rare](https://www.elastic.co/guide/en/machine-learning/current/ml-rare-functions.html#ml-freq-rare) | no | -| time_of_day, time_of_week | [Time functions](https://www.elastic.co/guide/en/machine-learning/current/ml-time-functions.html) | no | +| time_of_day, time_of_week | [Time functions](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ml-time-functions.md) | no | ### Jobs created in {{kib}} must use {{dfeeds}} [_jobs_created_in_kib_must_use_dfeeds] @@ -171,7 +171,7 @@ When the aggregation interval of the {{dfeed}} and the bucket span of the job do ### Calendars and filters are visible in all {{kib}} spaces [ml-space-limitations] -[Spaces](../../../deploy-manage/manage-spaces.md) enable you to organize your {{anomaly-jobs}} in {{kib}} and to see only the jobs and other saved objects that belong to your space. However, this limited scope does not apply to [calendars](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-calendars) and [filters](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-rules); they are visible in all spaces. +[Spaces](../../../deploy-manage/manage-spaces.md) enable you to organize your {{anomaly-jobs}} in {{kib}} and to see only the jobs and other saved objects that belong to your space. However, this limited scope does not apply to [calendars](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-calendars) and [filters](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-rules); they are visible in all spaces. ### Rollup indices are not supported in {{kib}} [ml-rollup-limitations] diff --git a/explore-analyze/machine-learning/anomaly-detection/move-jobs.md b/explore-analyze/machine-learning/anomaly-detection/move-jobs.md index 9b513834ff..bb21898c95 100644 --- a/explore-analyze/machine-learning/anomaly-detection/move-jobs.md +++ b/explore-analyze/machine-learning/anomaly-detection/move-jobs.md @@ -15,6 +15,6 @@ The exported file contains configuration details; it does not contain the {{ml}} There are some additional actions that you must take before you can successfully import and run your jobs: -1. The {{kib}} [{{data-sources}}](https://www.elastic.co/guide/en/kibana/current/data-views.html) that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. +1. The {{kib}} [{{data-sources}}](/explore-analyze/find-and-organize/data-views.md) that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. 2. If your {{anomaly-jobs}} use [custom rules](ml-configuring-detector-custom-rules.md) with filter lists, the filter lists must exist; otherwise, the import fails. To create filter lists, use {{kib}} or the [create filters API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter). -3. If your {{anomaly-jobs}} were associated with [calendars](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-calendars), you must create the calendar in the new environment and add your imported jobs to the calendar. Use {{kib}} or the [create calendars](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar), [add events to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events), and [add jobs to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) APIs. +3. If your {{anomaly-jobs}} were associated with [calendars](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-calendars), you must create the calendar in the new environment and add your imported jobs to the calendar. Use {{kib}} or the [create calendars](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar), [add events to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events), and [add jobs to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) APIs. diff --git a/explore-analyze/machine-learning/anomaly-detection/ootb-ml-jobs.md b/explore-analyze/machine-learning/anomaly-detection/ootb-ml-jobs.md index 4670c783f0..d1c79bfe5a 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ootb-ml-jobs.md +++ b/explore-analyze/machine-learning/anomaly-detection/ootb-ml-jobs.md @@ -9,17 +9,17 @@ mapped_pages: # Supplied configurations [ootb-ml-jobs] -{{anomaly-jobs-cap}} contain the configuration information and metadata necessary to perform an analytics task. {{kib}} can recognize certain types of data and provide specialized wizards for that context. This page lists the categories of the {{anomaly-jobs}} that are ready to use via {{kib}} in **Machine learning**. Refer to [Create {{anomaly-jobs}}](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job) to learn more about creating a job by using supplied configurations. Logs and Metrics supplied configurations are available and can be created via the related solution UI in {{kib}}. +{{anomaly-jobs-cap}} contain the configuration information and metadata necessary to perform an analytics task. {{kib}} can recognize certain types of data and provide specialized wizards for that context. This page lists the categories of the {{anomaly-jobs}} that are ready to use via {{kib}} in **Machine learning**. Refer to [Create {{anomaly-jobs}}](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job) to learn more about creating a job by using supplied configurations. Logs and Metrics supplied configurations are available and can be created via the related solution UI in {{kib}}. -* [Apache](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-apache.html) -* [APM](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-apm.html) -* [{{auditbeat}}](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-auditbeat.html) -* [Logs](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-logs-ui.html) -* [{{metricbeat}}](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-metricbeat.html) -* [Metrics](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-metrics-ui.html) -* [Nginx](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-nginx.html) -* [Security](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-siem.html) -* [Uptime](https://www.elastic.co/guide/en/machine-learning/current/ootb-ml-jobs-uptime.html) +* [Apache](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-apache.md) +* [APM](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-apm.md) +* [{{auditbeat}}](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-auditbeat.md) +* [Logs](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-logs-ui.md) +* [{{metricbeat}}](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-metricbeat.md) +* [Metrics](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-metrics-ui.md) +* [Nginx](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-nginx.md) +* [Security](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-siem.md) +* [Uptime](asciidocalypse://docs/docs-content/docs/reference/data-analysis/machine-learning/ootb-ml-jobs-uptime.md) ::::{note} The configurations are only available if data exists that matches the queries specified in the manifest files. These recognizer queries are linked in the descriptions of the individual configurations. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md index 5a1d33f533..864c15f64b 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md @@ -193,13 +193,13 @@ For instance, suppose you have an online service and you would like to predict w {{infer-cap}} can be used as a processor specified in an [ingest pipeline](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). It uses a trained model to infer against the data that is being ingested in the pipeline. The model is used on the ingest node. {{infer-cap}} pre-processes the data by using the model and provides a prediction. After the process, the pipeline continues executing (if there is any other processor in the pipeline), finally the new data together with the results are indexed into the destination index. -Check the [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/inference-processor.md) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. #### {{infer-cap}} aggregation [ml-inference-aggregation-class] {{infer-cap}} can also be used as a pipeline aggregation. You can reference a trained model in the aggregation to infer on the result field of the parent bucket aggregation. The {{infer}} aggregation uses the model on the results to provide a prediction. This aggregation enables you to run {{classification}} or {{reganalysis}} at search time. If you want to perform the analysis on a small set of data, this aggregation enables you to generate predictions without the need to set up a processor in the ingest pipeline. -Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} bucket aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-inference-bucket-aggregation.md) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. ::::{note} If you use trained model aliases to reference your trained model in an {{infer}} processor or {{infer}} aggregation, you can replace your trained model with a new one without the need of updating the processor or the aggregation. Reassign the alias you used to a new trained model ID by using the [Create or update trained model aliases API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias). The new trained model needs to use the same type of {{dfanalytics}} as the old one. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-limitations.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-limitations.md index d5975cdec1..3200fbb6c1 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-limitations.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-limitations.md @@ -37,7 +37,7 @@ You cannot update {{dfanalytics}} configurations. Instead, delete the {{dfanalyt ### {{dfanalytics-cap}} memory limitation [dfa-dataframe-size-limitations] -{{dfanalytics-cap}} can only perform analyses that fit into the memory available for {{ml}}. Overspill to disk is not currently possible. For general {{ml}} settings, see [{{ml-cap}} settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). +{{dfanalytics-cap}} can only perform analyses that fit into the memory available for {{ml}}. Overspill to disk is not currently possible. For general {{ml}} settings, see [{{ml-cap}} settings in {{es}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). When you create a {{dfanalytics-job}} and the inference step of the process fails due to the model is too large to fit into JVM, follow the steps in [this GitHub issue](https://github.com/elastic/elasticsearch/issues/76093) for a workaround. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md index 320336d6a7..d2210b155f 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md @@ -139,13 +139,13 @@ For instance, suppose you have an online service and you would like to predict w {{infer-cap}} can be used as a processor specified in an [ingest pipeline](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). It uses a trained model to infer against the data that is being ingested in the pipeline. The model is used on the ingest node. {{infer-cap}} pre-processes the data by using the model and provides a prediction. After the process, the pipeline continues executing (if there is any other processor in the pipeline), finally the new data together with the results are indexed into the destination index. -Check the [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/inference-processor.md) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. #### {{infer-cap}} aggregation [ml-inference-aggregation-reg] {{infer-cap}} can also be used as a pipeline aggregation. You can reference a trained model in the aggregation to infer on the result field of the parent bucket aggregation. The {{infer}} aggregation uses the model on the results to provide a prediction. This aggregation enables you to run {{classification}} or {{reganalysis}} at search time. If you want to perform the analysis on a small set of data, this aggregation enables you to generate predictions without the need to set up a processor in the ingest pipeline. -Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} bucket aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-inference-bucket-aggregation.md) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. ::::{note} If you use trained model aliases to reference your trained model in an {{infer}} processor or {{infer}} aggregation, you can replace your trained model with a new one without the need of updating the processor or the aggregation. Reassign the alias you used to a new trained model ID by using the [Create or update trained model aliases API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias). The new trained model needs to use the same type of {{dfanalytics}} as the old one. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md index fbc7fa983b..343ed90184 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md @@ -106,7 +106,7 @@ A few observations: ::::{note} -* Models exported from the [get trained models API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) are limited in size by the [http.max_content_length](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html) global configuration value in {{es}}. The default value is `100mb` and may need to be increased depending on the size of model being exported. +* Models exported from the [get trained models API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) are limited in size by the [http.max_content_length](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md) global configuration value in {{es}}. The default value is `100mb` and may need to be increased depending on the size of model being exported. * Connection timeouts can occur, for example, when model sizes are very large or your cluster is under load. If needed, you can increase [timeout configurations](https://ec.haxx.se/usingcurl/usingcurl-timeouts) for `curl` (for example, `curl --max-time 600`) or your client of choice. :::: @@ -115,4 +115,4 @@ If you also want to copy the {{dfanalytics-job}} to the new cluster, you can exp ## Importing an external model to the {{stack}} [import-external-model-to-es] -It is possible to import a model to your {{es}} cluster even if the model is not trained by Elastic {{dfanalytics}}. Eland supports [importing models](https://www.elastic.co/guide/en/elasticsearch/client/eland/current/machine-learning.html) directly through its APIs. Please refer to the latest [Eland documentation](https://eland.readthedocs.io/en/latest/index.md) for more information on supported model types and other details of using Eland to import models with. +It is possible to import a model to your {{es}} cluster even if the model is not trained by Elastic {{dfanalytics}}. Eland supports [importing models](asciidocalypse://docs/eland/docs/reference/elasticsearch/elasticsearch-client-eland/machine-learning.md) directly through its APIs. Please refer to the latest [Eland documentation](https://eland.readthedocs.io/en/latest/index.md) for more information on supported model types and other details of using Eland to import models with. diff --git a/explore-analyze/machine-learning/machine-learning-in-kibana.md b/explore-analyze/machine-learning/machine-learning-in-kibana.md index 5525d86f97..aa2b5f7f2e 100644 --- a/explore-analyze/machine-learning/machine-learning-in-kibana.md +++ b/explore-analyze/machine-learning/machine-learning-in-kibana.md @@ -35,7 +35,7 @@ File formats supported up to 60 MB: * Rich Text (RTF) * Open Document Format (ODF) -The **{{data-viz}}** identifies the file format and field mappings, and you can import the data into an {{es}} index. To change the default file size limit, see [`fileUpload:maxFileSize`](https://www.elastic.co/guide/en/kibana/current/advanced-options.html#kibana-general-settings) in advanced settings. +The **{{data-viz}}** identifies the file format and field mappings, and you can import the data into an {{es}} index. To change the default file size limit, see [`fileUpload:maxFileSize`](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md#kibana-general-settings) in advanced settings. If {{stack-security-features}} are enabled, users must have the necessary privileges to use {{ml-features}}. Refer to [Set up {{ml-features}}](setting-up-machine-learning.md#setup-privileges). diff --git a/explore-analyze/machine-learning/machine-learning-in-kibana/inference-processing.md b/explore-analyze/machine-learning/machine-learning-in-kibana/inference-processing.md index 97e0ac8807..c6afe4130f 100644 --- a/explore-analyze/machine-learning/machine-learning-in-kibana/inference-processing.md +++ b/explore-analyze/machine-learning/machine-learning-in-kibana/inference-processing.md @@ -35,7 +35,7 @@ Most commonly used to detect entities such as People, Places, and Organization i ### Text embedding [ingest-pipeline-search-inference-text-embedding] -Analyzing a text field using a [Text embedding](../nlp/ml-nlp-search-compare.md#ml-nlp-text-embedding) model will generate a [dense vector](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html) representation of the text. This array of numeric values encodes the semantic *meaning* of the text. Using the same model with a user’s search query will produce a vector that can then be used to search, ranking results based on vector similarity - semantic similarity - as opposed to traditional word or text similarity. +Analyzing a text field using a [Text embedding](../nlp/ml-nlp-search-compare.md#ml-nlp-text-embedding) model will generate a [dense vector](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md) representation of the text. This array of numeric values encodes the semantic *meaning* of the text. Using the same model with a user’s search query will produce a vector that can then be used to search, ranking results based on vector similarity - semantic similarity - as opposed to traditional word or text similarity. A common use case is a user searching FAQs, or a support agent searching a knowledge base, where semantically similar content may be indexed with little similarity in phrasing. diff --git a/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-aiops.md b/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-aiops.md index dba274febb..c6e8da9a14 100644 --- a/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-aiops.md +++ b/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-aiops.md @@ -52,7 +52,7 @@ Select a field for categorization and optionally apply any filters that you want This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. :::: -Change point detection uses the [change point aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-change-point-aggregation.html) to detect distribution changes, trend changes, and other statistically significant change points in a metric of your time series data. +Change point detection uses the [change point aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-change-point-aggregation.md) to detect distribution changes, trend changes, and other statistically significant change points in a metric of your time series data. You can find change point detection under **{{ml-app}}** > **AIOps Labs** or by using the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). Here, you can select the {{data-source}} or saved Discover session that you want to analyze. diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-deploy-model.md b/explore-analyze/machine-learning/nlp/ml-nlp-deploy-model.md index 6fc96f9393..a7cc8f489d 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-deploy-model.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-deploy-model.md @@ -37,4 +37,4 @@ For the resource levels when adaptive resources are enabled, refer to <[*Trained Each allocation of a model deployment has a dedicated queue to buffer {{infer}} requests. The size of this queue is determined by the `queue_capacity` parameter in the [start trained model deployment API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment). When the queue reaches its maximum capacity, new requests are declined until some of the queued requests are processed, creating available capacity once again. When multiple ingest pipelines reference the same deployment, the queue can fill up, resulting in rejected requests. Consider using dedicated deployments to prevent this situation. -{{infer-cap}} requests originating from search, such as the [`text_expansion` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-text-expansion-query.html), have a higher priority compared to non-search requests. The {{infer}} ingest processor generates normal priority requests. If both a search query and an ingest processor use the same deployment, the search requests with higher priority skip ahead in the queue for processing before the lower priority ingest requests. This prioritization accelerates search responses while potentially slowing down ingest where response time is less critical. +{{infer-cap}} requests originating from search, such as the [`text_expansion` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-text-expansion-query.md), have a higher priority compared to non-search requests. The {{infer}} ingest processor generates normal priority requests. If both a search query and an ingest processor use the same deployment, the search requests with higher priority skip ahead in the queue for processing before the lower priority ingest requests. This prioritization accelerates search responses while potentially slowing down ingest where response time is less critical. diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-elser.md b/explore-analyze/machine-learning/nlp/ml-nlp-elser.md index ea986aa6bc..fda84803da 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-elser.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-elser.md @@ -21,7 +21,7 @@ While ELSER V2 is generally available, ELSER V1 is in [preview] and will remain ## Tokens - not synonyms [elser-tokens] -ELSER expands the indexed and searched passages into collections of terms that are learned to co-occur frequently within a diverse set of training data. The terms that the text is expanded into by the model *are not* synonyms for the search terms; they are learned associations capturing relevance. These expanded terms are weighted as some of them are more significant than others. Then the {{es}} [sparse vector](https://www.elastic.co/guide/en/elasticsearch/reference/current/sparse-vector.html) (or [rank features](https://www.elastic.co/guide/en/elasticsearch/reference/current/rank-features.html)) field type is used to store the terms and weights at index time, and to search against later. +ELSER expands the indexed and searched passages into collections of terms that are learned to co-occur frequently within a diverse set of training data. The terms that the text is expanded into by the model *are not* synonyms for the search terms; they are learned associations capturing relevance. These expanded terms are weighted as some of them are more significant than others. Then the {{es}} [sparse vector](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/sparse-vector.md) (or [rank features](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/rank-features.md)) field type is used to store the terms and weights at index time, and to search against later. This approach provides a more understandable search experience compared to vector embeddings. However, attempting to directly interpret the tokens and weights can be misleading, as the expansion essentially results in a vector in a very high-dimensional space. Consequently, certain tokens, especially those with low weight, contain information that is intertwined with other low-weight tokens in the representation. In this regard, they function similarly to a dense vector representation, making it challenging to separate their individual contributions. This complexity can potentially lead to misinterpretations if not carefully considered during analysis. @@ -172,7 +172,7 @@ POST _ml/trained_models/.elser_model_2/deployment/_start?deployment_id=for_searc If you want to deploy ELSER in a restricted or closed network, you have two options: * create your own HTTP/HTTPS endpoint with the model artifacts on it, -* put the model artifacts into a directory inside the config directory on all [master-eligible nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#master-node). +* put the model artifacts into a directory inside the config directory on all [master-eligible nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#master-node). ### Model artifact files [elser-model-artifacts] @@ -284,7 +284,7 @@ To learn more about ELSER performance, refer to the [Benchmark information](#els ## Pre-cleaning input text [pre-cleaning] -The quality of the input text significantly affects the quality of the embeddings. To achieve the best results, it’s recommended to clean the input text before generating embeddings. The exact preprocessing you may need to do heavily depends on your text. For example, if your text contains HTML tags, use the [HTML strip processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/htmlstrip-processor.html) in an ingest pipeline to remove unnecessary elements. Always review and clean your input text before ingestion to eliminate any irrelevant entities that might affect the results. +The quality of the input text significantly affects the quality of the embeddings. To achieve the best results, it’s recommended to clean the input text before generating embeddings. The exact preprocessing you may need to do heavily depends on your text. For example, if your text contains HTML tags, use the [HTML strip processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/htmlstrip-processor.md) in an ingest pipeline to remove unnecessary elements. Always review and clean your input text before ingestion to eliminate any irrelevant entities that might affect the results. ## Recommendations for using ELSER [elser-recommendations] diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-import-model.md b/explore-analyze/machine-learning/nlp/ml-nlp-import-model.md index eea06fef59..82da77a6b8 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-import-model.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-import-model.md @@ -9,7 +9,7 @@ mapped_pages: # Import the trained model and vocabulary [ml-nlp-import-model] ::::{important} -If you want to install a trained model in a restricted or closed network, refer to [these instructions](https://www.elastic.co/guide/en/elasticsearch/client/eland/current/machine-learning.html#ml-nlp-pytorch-air-gapped). +If you want to install a trained model in a restricted or closed network, refer to [these instructions](asciidocalypse://docs/eland/docs/reference/elasticsearch/elasticsearch-client-eland/machine-learning.md#ml-nlp-pytorch-air-gapped). :::: After you choose a model, you must import it and its tokenizer vocabulary to your cluster. When you import the model, it must be chunked and imported one chunk at a time for storage in parts due to its size. @@ -22,7 +22,7 @@ Trained models must be in a TorchScript representation for use with {{stack-ml-f ## Import with the Eland client installed [ml-nlp-import-script] -1. Install the [Eland Python client](https://www.elastic.co/guide/en/elasticsearch/client/eland/current/installation.html) with PyTorch extra dependencies. +1. Install the [Eland Python client](asciidocalypse://docs/eland/docs/reference/elasticsearch/elasticsearch-client-eland/installation.md) with PyTorch extra dependencies. ```shell python -m pip install 'eland[pytorch]' diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-inference.md b/explore-analyze/machine-learning/nlp/ml-nlp-inference.md index c94ffba544..a9830faa76 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-inference.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-inference.md @@ -25,7 +25,7 @@ In {{kib}}, you can create and edit pipelines in **{{stack-manage-app}}** > **In ::: 1. Click **Create pipeline** or edit an existing pipeline. -2. Add an [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) to your pipeline: +2. Add an [{{infer}} processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/inference-processor.md) to your pipeline: 1. Click **Add a processor** and select the **{{infer-cap}}** processor type. 2. Set **Model ID** to the name of your trained model, for example `elastic__distilbert-base-cased-finetuned-conll03-english` or `lang_ident_model_1`. @@ -51,7 +51,7 @@ In {{kib}}, you can create and edit pipelines in **{{stack-manage-app}}** > **In 4. Click **Add** to save the processor. -3. Optional: Add a [set processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/set-processor.html) to index the ingest timestamp. +3. Optional: Add a [set processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/set-processor.md) to index the ingest timestamp. 1. Click **Add a processor** and select the **Set** processor type. 2. Choose a name for the field (such as `event.ingested`) and set its value to `{{{_ingest.timestamp}}}`. For more details, refer to [Access ingest metadata in a processor](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md#access-ingest-metadata). @@ -117,7 +117,7 @@ PUT ner-test ``` ::::{tip} -To use the `annotated_text` data type in this example, you must install the [mapper annotated text plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/mapper-annotated-text.html). For more installation details, refer to [Add plugins provided with {{ess}}](https://www.elastic.co/guide/en/cloud/current/ec-adding-elastic-plugins.html). +To use the `annotated_text` data type in this example, you must install the [mapper annotated text plugin](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/mapper-annotated-text.md). For more installation details, refer to [Add plugins provided with {{ess}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/cloud/ec-adding-elastic-plugins.md). :::: You can then use the new pipeline to index some documents. For example, use a bulk indexing request with the `pipeline` query parameter for your NER pipeline: diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md b/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md index 22f238f910..5ef661ad63 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md @@ -12,7 +12,7 @@ The following limitations and known problems apply to the 9.0.0-beta1 release of ## Document size limitations when using `semantic_text` fields [ml-nlp-large-documents-limit-10k-10mb] -When using semantic text to ingest documents, chunking takes place automatically. The number of chunks is limited by the [`index.mapping.nested_objects.limit`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-settings-limit.html) cluster setting, which defaults to 10k. Documents that are too large will cause errors during ingestion. To avoid this issue, please split your documents into roughly 1MB parts before ingestion. +When using semantic text to ingest documents, chunking takes place automatically. The number of chunks is limited by the [`index.mapping.nested_objects.limit`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/mapping-limit-settings.md) cluster setting, which defaults to 10k. Documents that are too large will cause errors during ingestion. To avoid this issue, please split your documents into roughly 1MB parts before ingestion. ## ELSER semantic search is limited to 512 tokens per field that inference is applied to [ml-nlp-elser-v1-limit-512] diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-model-ref.md b/explore-analyze/machine-learning/nlp/ml-nlp-model-ref.md index 0d02b4e113..78bb86f04d 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-model-ref.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-model-ref.md @@ -70,7 +70,7 @@ Sparse embedding models should be configured with the `text_expansion` task type Text Embedding models are designed to work with specific scoring functions for calculating the similarity between the embeddings they produce. Examples of typical scoring functions are: `cosine`, `dot product` and `euclidean distance` (also known as `l2_norm`). -The embeddings produced by these models should be indexed in {{es}} using the [dense vector field type](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html) with an appropriate [similarity function](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-params) chosen for the model. +The embeddings produced by these models should be indexed in {{es}} using the [dense vector field type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md) with an appropriate [similarity function](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-params) chosen for the model. To find similar embeddings in {{es}} use the efficient [Approximate k-nearest neighbor (kNN)](../../../solutions/search/vector/knn.md#approximate-knn) search API with a text embedding as the query vector. Approximate kNN search uses the similarity function defined in the dense vector field mapping is used to calculate the relevance. For the best results the function must be one of the suitable similarity functions for the model. diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-ner-example.md b/explore-analyze/machine-learning/nlp/ml-nlp-ner-example.md index 36625fd7ba..444aae80bb 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-ner-example.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-ner-example.md @@ -113,7 +113,7 @@ Using the example text "Elastic is headquartered in Mountain View, California.", ## Add the NER model to an {{infer}} ingest pipeline [ex-ner-ingest] -You can perform bulk {{infer}} on documents as they are ingested by using an [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) in your ingest pipeline. The novel *Les Misérables* by Victor Hugo is used as an example for {{infer}} in the following example. [Download](https://github.com/elastic/stack-docs/blob/8.5/docs/en/stack/ml/nlp/data/les-miserables-nd.json) the novel text split by paragraph as a JSON file, then upload it by using the [Data Visualizer](../../../manage-data/ingest/tools/upload-data-files.md). Give the new index the name `les-miserables` when uploading the file. +You can perform bulk {{infer}} on documents as they are ingested by using an [{{infer}} processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/inference-processor.md) in your ingest pipeline. The novel *Les Misérables* by Victor Hugo is used as an example for {{infer}} in the following example. [Download](https://github.com/elastic/stack-docs/blob/8.5/docs/en/stack/ml/nlp/data/les-miserables-nd.json) the novel text split by paragraph as a JSON file, then upload it by using the [Data Visualizer](../../../manage-data/ingest/tools/upload-data-files.md). Give the new index the name `les-miserables` when uploading the file. Now create an ingest pipeline either in the [Stack management UI](ml-nlp-inference.md#ml-nlp-inference-processor) or by using the API: diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-overview.md b/explore-analyze/machine-learning/nlp/ml-nlp-overview.md index 0e029e865b..bbc0adc396 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-overview.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-overview.md @@ -20,7 +20,7 @@ The [{{infer}} API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endp You can **upload and manage NLP models** using the Eland client and the [{{stack}}](ml-nlp-deploy-models.md). Find the [list of recommended and compatible models here](ml-nlp-model-ref.md). Refer to [*Examples*](ml-nlp-examples.md) to learn more about how to use {{ml}} models deployed in your cluster. -You can **store embeddings in your {{es}} vector database** if you generate [dense vector](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html) or [sparse vector](https://www.elastic.co/guide/en/elasticsearch/reference/current/sparse-vector.html) model embeddings outside of {{es}}. +You can **store embeddings in your {{es}} vector database** if you generate [dense vector](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md) or [sparse vector](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/sparse-vector.md) model embeddings outside of {{es}}. ## What is NLP? [what-is-nlp] diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-rerank.md b/explore-analyze/machine-learning/nlp/ml-nlp-rerank.md index 85897f7b65..e92b74bd40 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-rerank.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-rerank.md @@ -166,7 +166,7 @@ For a file-based access, follow these steps: * English language only * Maximum context window of 512 tokens - When using the [`semantic_text` field type](https://www.elastic.co/guide/en/elasticsearch/reference/current/semantic-text.html), text is divided into chunks. By default, each chunk contains 250 words (approximately 400 tokens). Be cautious when increasing the chunk size - if the combined length of your query and chunk text exceeds 512 tokens, the model won’t have access to the full content. + When using the [`semantic_text` field type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/semantic-text.md), text is divided into chunks. By default, each chunk contains 250 words (approximately 400 tokens). Be cautious when increasing the chunk size - if the combined length of your query and chunk text exceeds 512 tokens, the model won’t have access to the full content. When the combined inputs exceed the 512 token limit, a balanced truncation strategy is used. If both the query and input text are longer than 255 tokens each then both are truncated, otherwise the longest is truncated. diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-search-compare.md b/explore-analyze/machine-learning/nlp/ml-nlp-search-compare.md index 60f7e6b23c..91b641aa2f 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-search-compare.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-search-compare.md @@ -17,7 +17,7 @@ The {{stack-ml-features}} can generate embeddings, which you can use to search i Text embedding is a task which produces a mathematical representation of text called an embedding. The {{ml}} model turns the text into an array of numerical values (also known as a *vector*). Pieces of content with similar meaning have similar representations. This means it is possible to determine whether different pieces of text are either semantically similar, different, or even opposite by using a mathematical similarity function. -This task is responsible for producing only the embedding. When the embedding is created, it can be stored in a [dense_vector](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html) field and used at search time. For example, you can use these vectors in a [k-nearest neighbor (kNN) search](../../../solutions/search/vector/knn.md) to achieve semantic search capabilities. +This task is responsible for producing only the embedding. When the embedding is created, it can be stored in a [dense_vector](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md) field and used at search time. For example, you can use these vectors in a [k-nearest neighbor (kNN) search](../../../solutions/search/vector/knn.md) to achieve semantic search capabilities. The following is an example of producing a text embedding: diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-text-emb-vector-search-example.md b/explore-analyze/machine-learning/nlp/ml-nlp-text-emb-vector-search-example.md index bed59cf75c..eecde0acd5 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-text-emb-vector-search-example.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-text-emb-vector-search-example.md @@ -112,7 +112,7 @@ Upload the file by using the [Data Visualizer](../../../manage-data/ingest/tools ## Add the text embedding model to an {{infer}} ingest pipeline [ex-text-emb-ingest] -Process the initial data with an [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html). It adds an embedding for each passage. For this, create a text embedding ingest pipeline and then reindex the initial data with this pipeline. +Process the initial data with an [{{infer}} processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/inference-processor.md). It adds an embedding for each passage. For this, create a text embedding ingest pipeline and then reindex the initial data with this pipeline. Now create an ingest pipeline either in the [{{stack-manage-app}} UI](ml-nlp-inference.md#ml-nlp-inference-processor) or by using the API: diff --git a/explore-analyze/machine-learning/setting-up-machine-learning.md b/explore-analyze/machine-learning/setting-up-machine-learning.md index e86708a745..6402734f42 100644 --- a/explore-analyze/machine-learning/setting-up-machine-learning.md +++ b/explore-analyze/machine-learning/setting-up-machine-learning.md @@ -14,8 +14,8 @@ mapped_pages: To use the {{stack}} {{ml-features}}, you must have: * the [appropriate subscription](https://www.elastic.co/subscriptions) level or the free trial period activated -* `xpack.ml.enabled` set to its default value of `true` on every node in the cluster (refer to [{{ml-cap}} settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html)) -* `ml` value defined in the list of `node.roles` on the [{{ml}} nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#ml-node) +* `xpack.ml.enabled` set to its default value of `true` on every node in the cluster (refer to [{{ml-cap}} settings in {{es}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md)) +* `ml` value defined in the list of `node.roles` on the [{{ml}} nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#ml-node) * {{ml}} features visible in the {{kib}} space * security privileges assigned to the user that: diff --git a/explore-analyze/numeral-formatting.md b/explore-analyze/numeral-formatting.md index f8b661f9ed..17c21e14a5 100644 --- a/explore-analyze/numeral-formatting.md +++ b/explore-analyze/numeral-formatting.md @@ -12,7 +12,7 @@ Numeral formatting in {{kib}} is done through a pattern-based syntax. These patt Numeral formatting patterns are used in multiple places in {{kib}}, including: -* [Advanced settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html) +* [Advanced settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md) * [Data view formatters](find-and-organize/data-views.md#field-formatters-numeric) * [**TSVB**](visualize/legacy-editors/tsvb.md) * [**Canvas**](visualize/canvas.md) diff --git a/explore-analyze/query-filter/aggregations.md b/explore-analyze/query-filter/aggregations.md index c2905f81ef..b2e8725b6f 100644 --- a/explore-analyze/query-filter/aggregations.md +++ b/explore-analyze/query-filter/aggregations.md @@ -17,13 +17,13 @@ An aggregation summarizes your data as metrics, statistics, or other analytics. {{es}} organizes aggregations into three categories: -* [Metric](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) aggregations that calculate metrics, such as a sum or average, from field values. -* [Bucket](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) aggregations that group documents into buckets, also called bins, based on field values, ranges, or other criteria. -* [Pipeline](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html) aggregations that take input from other aggregations instead of documents or fields. +* [Metric](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/metrics.md) aggregations that calculate metrics, such as a sum or average, from field values. +* [Bucket](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/bucket.md) aggregations that group documents into buckets, also called bins, based on field values, ranges, or other criteria. +* [Pipeline](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/pipeline.md) aggregations that take input from other aggregations instead of documents or fields. ## Run an aggregation [run-an-agg] -You can run aggregations as part of a [search](../../solutions/search/querying-for-search.md) by specifying the [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search)'s `aggs` parameter. The following search runs a [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) on `my-field`: +You can run aggregations as part of a [search](../../solutions/search/querying-for-search.md) by specifying the [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search)'s `aggs` parameter. The following search runs a [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) on `my-field`: ```console GET /my-index-000001/_search @@ -137,7 +137,7 @@ GET /my-index-000001/_search ## Run sub-aggregations [run-sub-aggs] -Bucket aggregations support bucket or metric sub-aggregations. For example, a terms aggregation with an [avg](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html) sub-aggregation calculates an average value for each bucket of documents. There is no level or depth limit for nesting sub-aggregations. +Bucket aggregations support bucket or metric sub-aggregations. For example, a terms aggregation with an [avg](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-avg-aggregation.md) sub-aggregation calculates an average value for each bucket of documents. There is no level or depth limit for nesting sub-aggregations. ```console GET /my-index-000001/_search @@ -244,7 +244,7 @@ GET /my-index-000001/_search?typed_keys The response returns the aggregation type as a prefix to the aggregation’s name. ::::{important} -Some aggregations return a different aggregation type from the type in the request. For example, the terms, [significant terms](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html), and [percentiles](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html) aggregations return different aggregations types depending on the data type of the aggregated field. +Some aggregations return a different aggregation type from the type in the request. For example, the terms, [significant terms](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-significantterms-aggregation.md), and [percentiles](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-percentile-aggregation.md) aggregations return different aggregations types depending on the data type of the aggregated field. :::: ```console-result @@ -284,14 +284,14 @@ GET /my-index-000001/_search?size=0 } ``` -Scripts calculate field values dynamically, which adds a little overhead to the aggregation. In addition to the time spent calculating, some aggregations like [`terms`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) and [`filters`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html) can’t use some of their optimizations with runtime fields. In total, performance costs for using a runtime field varies from aggregation to aggregation. +Scripts calculate field values dynamically, which adds a little overhead to the aggregation. In addition to the time spent calculating, some aggregations like [`terms`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) and [`filters`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-filters-aggregation.md) can’t use some of their optimizations with runtime fields. In total, performance costs for using a runtime field varies from aggregation to aggregation. ## Aggregation caches [agg-caches] -For faster responses, {{es}} caches the results of frequently run aggregations in the [shard request cache](https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-request-cache.html). To get cached results, use the same [`preference` string](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shard-routing.html#shard-and-node-preference) for each search. If you don’t need search hits, [set `size` to `0`](#return-only-agg-results) to avoid filling the cache. +For faster responses, {{es}} caches the results of frequently run aggregations in the [shard request cache](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/shard-request-cache-settings.md). To get cached results, use the same [`preference` string](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/search-shard-routing.md#shard-and-node-preference) for each search. If you don’t need search hits, [set `size` to `0`](#return-only-agg-results) to avoid filling the cache. {{es}} routes searches with the same preference string to the same shards. If the shards' data doesn’t change between searches, the shards return cached aggregation results. ## Limits for `long` values [limits-for-long-values] -When running aggregations, {{es}} uses [`double`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) values to hold and represent numeric data. As a result, aggregations on [`long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) numbers greater than `253` are approximate. +When running aggregations, {{es}} uses [`double`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) values to hold and represent numeric data. As a result, aggregations on [`long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) numbers greater than `253` are approximate. diff --git a/explore-analyze/query-filter/aggregations/tutorial-analyze-ecommerce-data-with-aggregations-using-query-dsl.md b/explore-analyze/query-filter/aggregations/tutorial-analyze-ecommerce-data-with-aggregations-using-query-dsl.md index fc66dc8a90..6063f08114 100644 --- a/explore-analyze/query-filter/aggregations/tutorial-analyze-ecommerce-data-with-aggregations-using-query-dsl.md +++ b/explore-analyze/query-filter/aggregations/tutorial-analyze-ecommerce-data-with-aggregations-using-query-dsl.md @@ -268,19 +268,19 @@ The response shows the field mappings for the `kibana_sample_data_ecommerce` ind :::: -The sample data includes the following [field data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html): +The sample data includes the following [field data types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md): -* [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) for text fields - * Most `text` fields have a `.keyword` subfield for exact matching using [multi-fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) +* [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) and [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) for text fields + * Most `text` fields have a `.keyword` subfield for exact matching using [multi-fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md) -* [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) for date fields -* 3 [numeric](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) types: +* [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) for date fields +* 3 [numeric](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) types: * `integer` for whole numbers * `long` for large whole numbers * `half_float` for floating-point numbers -* [`geo_point`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) for geographic coordinates -* [`object`](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) for nested structures such as `products`, `geoip`, `event` +* [`geo_point`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) for geographic coordinates +* [`object`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) for nested structures such as `products`, `geoip`, `event` Now that we understand the structure of our sample data, let’s start analyzing it. @@ -290,7 +290,7 @@ Let’s start by calculating important metrics about orders and customers. ### Get average order size [aggregations-tutorial-order-value] -Calculate the average order value across all orders in the dataset using the [`avg`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html) aggregation. +Calculate the average order value across all orders in the dataset using the [`avg`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-avg-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search @@ -347,7 +347,7 @@ GET kibana_sample_data_ecommerce/_search ### Get multiple order statistics at once [aggregations-tutorial-order-stats] -Calculate multiple statistics about orders in one request using the [`stats`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html) aggregation. +Calculate multiple statistics about orders in one request using the [`stats`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-stats-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search @@ -401,7 +401,7 @@ Let’s group orders in different ways to understand sales patterns. ### Break down sales by category [aggregations-tutorial-category-breakdown] -Group orders by category to see which product categories are most popular, using the [`terms`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) aggregation. +Group orders by category to see which product categories are most popular, using the [`terms`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search @@ -486,7 +486,7 @@ GET kibana_sample_data_ecommerce/_search ### Track daily sales patterns [aggregations-tutorial-daily-sales] -Group orders by day to track daily sales patterns using the [`date_histogram`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html) aggregation. +Group orders by day to track daily sales patterns using the [`date_histogram`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-datehistogram-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search @@ -508,7 +508,7 @@ GET kibana_sample_data_ecommerce/_search 1. Descriptive name for the time-series aggregation results. 2. The `date_histogram` aggregation groups documents into time-based buckets, similar to terms aggregation but for dates. 3. Uses [calendar and fixed time intervals](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html#calendar_and_fixed_intervals) to handle months with different lengths. `"day"` ensures consistent daily grouping regardless of timezone. -4. Formats dates in response using [date patterns](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html) (e.g. "yyyy-MM-dd"). Refer to [date math expressions](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#date-math) for additional options. +4. Formats dates in response using [date patterns](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-date-format.md) (e.g. "yyyy-MM-dd"). Refer to [date math expressions](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#date-math) for additional options. 5. When `min_doc_count` is 0, returns buckets for days with no orders, useful for continuous time series visualization. ::::{dropdown} Example response @@ -705,7 +705,7 @@ GET kibana_sample_data_ecommerce/_search ## Combine metrics with groupings [aggregations-tutorial-combined-analysis] -Now let’s calculate [metrics](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) within each group to get deeper insights. +Now let’s calculate [metrics](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/metrics.md) within each group to get deeper insights. ### Compare category performance [aggregations-tutorial-category-metrics] @@ -827,7 +827,7 @@ GET kibana_sample_data_ecommerce/_search ``` 1. Daily revenue -2. Uses the [`cardinality`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html) aggregation to count unique customers per day +2. Uses the [`cardinality`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-cardinality-aggregation.md) aggregation to count unique customers per day 3. Average number of items per order ::::{dropdown} Example response @@ -1297,11 +1297,11 @@ GET kibana_sample_data_ecommerce/_search ## Track trends and patterns [aggregations-tutorial-trends] -You can use [pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html) on the results of other aggregations. Let’s analyze how metrics change over time. +You can use [pipeline aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/pipeline.md) on the results of other aggregations. Let’s analyze how metrics change over time. ### Smooth out daily fluctuations [aggregations-tutorial-moving-average] -Moving averages help identify trends by reducing day-to-day noise in the data. Let’s observe sales trends more clearly by smoothing daily revenue variations, using the [Moving Function](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movfn-aggregation.html) aggregation. +Moving averages help identify trends by reducing day-to-day noise in the data. Let’s observe sales trends more clearly by smoothing daily revenue variations, using the [Moving Function](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-movfn-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search @@ -1724,7 +1724,7 @@ Notice how the smoothed values lag behind the actual values - this is because th ### Track running totals [aggregations-tutorial-cumulative] -Track running totals over time using the [`cumulative_sum`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html) aggregation. +Track running totals over time using the [`cumulative_sum`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-cumulative-sum-aggregation.md) aggregation. ```console GET kibana_sample_data_ecommerce/_search diff --git a/explore-analyze/query-filter/filtering.md b/explore-analyze/query-filter/filtering.md index d8583c60f6..74352aaa10 100644 --- a/explore-analyze/query-filter/filtering.md +++ b/explore-analyze/query-filter/filtering.md @@ -26,7 +26,7 @@ Some apps provide more options, such as [Dashboards](../dashboards.md). ## Time filter [set-time-filter] -Display data within a specified time range when your index contains time-based events, and a time-field is configured for the selected [{{data-source}}](../find-and-organize/data-views.md). The default time range is 15 minutes, but you can customize it in [Advanced Settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html). +Display data within a specified time range when your index contains time-based events, and a time-field is configured for the selected [{{data-source}}](../find-and-organize/data-views.md). The default time range is 15 minutes, but you can customize it in [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md). 1. Click ![calendar icon](../../images/kibana-time-filter-icon.png). 2. Choose one of the following: diff --git a/explore-analyze/query-filter/languages/eql.md b/explore-analyze/query-filter/languages/eql.md index 218c26252a..3844247cf5 100644 --- a/explore-analyze/query-filter/languages/eql.md +++ b/explore-analyze/query-filter/languages/eql.md @@ -18,7 +18,7 @@ Event Query Language (EQL) is a query language for event-based time series data, ## Advantages of EQL [eql-advantages] * **EQL lets you express relationships between events.**
Many query languages allow you to match single events. EQL lets you match a sequence of events across different event categories and time spans. -* **EQL has a low learning curve.**
[EQL syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html) looks like other common query languages, such as SQL. EQL lets you write and read queries intuitively, which makes for quick, iterative searching. +* **EQL has a low learning curve.**
[EQL syntax](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/eql-syntax.md) looks like other common query languages, such as SQL. EQL lets you write and read queries intuitively, which makes for quick, iterative searching. * **EQL is designed for security use cases.**
While you can use it for any event-based data, we created EQL for threat hunting. EQL not only supports indicator of compromise (IOC) searches but can describe activity that goes beyond IOCs. @@ -871,7 +871,7 @@ GET /my-index*/_eql/search By default, each hit in the search response includes the document `_source`, which is the entire JSON object that was provided when indexing the document. -You can use the [`filter_path`](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#common-options-response-filtering) query parameter to filter the API response. For example, the following search returns only the timestamp and PID from the `_source` of each matching event. +You can use the [`filter_path`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#common-options-response-filtering) query parameter to filter the API response. For example, the following search returns only the timestamp and PID from the `_source` of each matching event. ```console GET /my-data-stream/_eql/search?filter_path=hits.events._source.@timestamp,hits.events._source.process.pid @@ -909,12 +909,12 @@ The API returns the following response. } ``` -You can also use the `fields` parameter to retrieve and format specific fields in the response. This field is identical to the search API’s [`fields` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html). +You can also use the `fields` parameter to retrieve and format specific fields in the response. This field is identical to the search API’s [`fields` parameter](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md). Because it consults the index mappings, the `fields` parameter provides several advantages over referencing the `_source` directly. Specifically, the `fields` parameter: * Returns each value in a standardized way that matches its mapping type -* Accepts [multi-fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) and [field aliases](https://www.elastic.co/guide/en/elasticsearch/reference/current/field-alias.html) +* Accepts [multi-fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md) and [field aliases](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-alias.md) * Formats dates and spatial data types * Retrieves [runtime field values](../../../manage-data/data-store/mapping/retrieve-runtime-field.md) * Returns fields calculated by a script at index time @@ -1055,7 +1055,7 @@ GET /my-data-stream/_eql/search } ``` -The event category field must be mapped as a [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) family field type. The timestamp field should be mapped as a [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) field type. [`date_nanos`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html) timestamp fields are not supported. You cannot use a [`nested`](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) field or the sub-fields of a `nested` field as the timestamp or event category field. +The event category field must be mapped as a [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) family field type. The timestamp field should be mapped as a [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) field type. [`date_nanos`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md) timestamp fields are not supported. You cannot use a [`nested`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/nested.md) field or the sub-fields of a `nested` field as the timestamp or event category field. ## Specify a sort tiebreaker [eql-search-specify-a-sort-tiebreaker] @@ -1286,5 +1286,5 @@ GET /cluster_one:my-data-stream,cluster_two:my-data-stream/_eql/search ## EQL circuit breaker settings [eql-circuit-breaker] -The relevant circuit breaker settings can be found in the [Circuit Breakers page](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#circuit-breakers-page-eql). +The relevant circuit breaker settings can be found in the [Circuit Breakers page](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md#circuit-breakers-page-eql). diff --git a/explore-analyze/query-filter/languages/esql-cross-clusters.md b/explore-analyze/query-filter/languages/esql-cross-clusters.md index 69b1842b0d..a0d6011ca2 100644 --- a/explore-analyze/query-filter/languages/esql-cross-clusters.md +++ b/explore-analyze/query-filter/languages/esql-cross-clusters.md @@ -22,13 +22,13 @@ With {{esql}}, you can execute a single query across multiple clusters. ## Prerequisites [esql-ccs-prerequisites] -* {{ccs-cap}} requires remote clusters. To set up remote clusters on {{ess}}, see [configure remote clusters on {{ess}}](https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html). If you run {{es}} on your own hardware, see [*Remote clusters*](../../../deploy-manage/remote-clusters.md). +* {{ccs-cap}} requires remote clusters. To set up remote clusters on {{ess}}, see [configure remote clusters on {{ess}}](/deploy-manage/remote-clusters/ec-enable-ccs.md). If you run {{es}} on your own hardware, see [*Remote clusters*](../../../deploy-manage/remote-clusters.md). To ensure your remote cluster configuration supports {{ccs}}, see [Supported {{ccs}} configurations](../../../solutions/search/cross-cluster-search.md#ccs-supported-configurations). * For full {{ccs}} capabilities, the local and remote cluster must be on the same [subscription level](https://www.elastic.co/subscriptions). * The local coordinating node must have the [`remote_cluster_client`](../../../deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md#remote-node) node role. -* If you use [sniff mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/remote-clusters.html#sniff-mode), the local coordinating node must be able to connect to seed and gateway nodes on the remote cluster. +* If you use [sniff mode](/deploy-manage/remote-clusters/remote-clusters-self-managed.md#sniff-mode), the local coordinating node must be able to connect to seed and gateway nodes on the remote cluster. We recommend using gateway nodes capable of serving as coordinating nodes. The seed nodes can be a subset of these gateway nodes. @@ -361,7 +361,7 @@ Which returns: ## Enrich across clusters [ccq-enrich] -Enrich in {{esql}} across clusters operates similarly to [local enrich](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich). If the enrich policy and its enrich indices are consistent across all clusters, simply write the enrich command as you would without remote clusters. In this default mode, {{esql}} can execute the enrich command on either the local cluster or the remote clusters, aiming to minimize computation or inter-cluster data transfer. Ensuring that the policy exists with consistent data on both the local cluster and the remote clusters is critical for ES|QL to produce a consistent query result. +Enrich in {{esql}} across clusters operates similarly to [local enrich](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md#esql-enrich). If the enrich policy and its enrich indices are consistent across all clusters, simply write the enrich command as you would without remote clusters. In this default mode, {{esql}} can execute the enrich command on either the local cluster or the remote clusters, aiming to minimize computation or inter-cluster data transfer. Ensuring that the policy exists with consistent data on both the local cluster and the remote clusters is critical for ES|QL to produce a consistent query result. ::::{tip} Enrich in {{esql}} across clusters using the API key based security model was introduced in version **8.15.0**. Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to use the new required permissions. Refer to the example in the [API key authentication](#esql-ccs-security-model-api-key) section. diff --git a/explore-analyze/query-filter/languages/esql-getting-started.md b/explore-analyze/query-filter/languages/esql-getting-started.md index 3e9a97fd72..8c40b1a34d 100644 --- a/explore-analyze/query-filter/languages/esql-getting-started.md +++ b/explore-analyze/query-filter/languages/esql-getting-started.md @@ -117,7 +117,7 @@ You can adjust the editor’s height by dragging its bottom border to your likin ## Your first {{esql}} query [esql-getting-started-first-query] -Each {{esql}} query starts with a [source command](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-source-commands). A source command produces a table, typically with data from {{es}}. +Each {{esql}} query starts with a [source command](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md#esql-source-commands). A source command produces a table, typically with data from {{es}}. :::{image} ../../../images/elasticsearch-reference-source-command.svg :alt: A source command producing a table from {{es}} @@ -191,7 +191,7 @@ FROM sample_data | WHERE event_duration > 5000000 ``` -`WHERE` supports several [operators](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-operators). For example, you can use [`LIKE`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-like-operator) to run a wildcard query against the `message` column: +`WHERE` supports several [operators](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-functions-operators.md#esql-operators). For example, you can use [`LIKE`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-like-operator) to run a wildcard query against the `message` column: ```esql FROM sample_data @@ -269,7 +269,7 @@ FROM sample_data ## Access columns [esql-getting-started-access-columns] -You can access columns by their name. If a name contains special characters, [it needs to be quoted](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-syntax.html#esql-identifiers) with backticks (```). +You can access columns by their name. If a name contains special characters, [it needs to be quoted](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-syntax.md#esql-identifiers) with backticks (```). Assigning an explicit name to a column created by `EVAL` or `STATS` is optional. If you don’t provide a name, the new column name is equal to the function expression. For example: @@ -309,7 +309,7 @@ FROM sample_data ## Enrich data [esql-getting-started-enrich] -{{esql}} enables you to [enrich](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-enrich-data.html) a table with data from indices in {{es}}, using the [`ENRICH`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich) command. +{{esql}} enables you to [enrich](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-enrich-data.md) a table with data from indices in {{es}}, using the [`ENRICH`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich) command. :::{image} ../../../images/elasticsearch-reference-esql-enrich.png :alt: esql enrich @@ -391,7 +391,7 @@ For more about data enrichment with {{esql}}, refer to [Data enrichment](https:/ ## Process data [esql-getting-started-process-data] -Your data may contain unstructured strings that you want to [structure](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-process-data-with-dissect-and-grok.html) to make it easier to analyze the data. For example, the sample data contains log messages like: +Your data may contain unstructured strings that you want to [structure](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-process-data-with-dissect-grok.md) to make it easier to analyze the data. For example, the sample data contains log messages like: ```txt "Connected to 10.1.0.3" @@ -424,5 +424,5 @@ For more about data processing with {{esql}}, refer to [Data processing with DIS ## Learn more [esql-getting-learn-more] -To learn more about {{esql}}, refer to [{{esql}} reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-language.html). +To learn more about {{esql}}, refer to [{{esql}} reference](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql.md). diff --git a/explore-analyze/query-filter/languages/esql-kibana.md b/explore-analyze/query-filter/languages/esql-kibana.md index 6ddbd2c1a7..615ce5dc00 100644 --- a/explore-analyze/query-filter/languages/esql-kibana.md +++ b/explore-analyze/query-filter/languages/esql-kibana.md @@ -21,7 +21,7 @@ This guide shows you how to use {{esql}} in Kibana. To follow along with the que ## Enable or disable {{esql}} [esql-kibana-enable] -{{esql}} is enabled by default in {{kib}}. It can be disabled using the `enableESQL` setting from the [Advanced Settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html). +{{esql}} is enabled by default in {{kib}}. It can be disabled using the `enableESQL` setting from the [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md). This will hide the {{esql}} user interface from various applications. However, users will be able to access existing {{esql}} artifacts like saved searches and visualizations. @@ -39,7 +39,7 @@ After switching to {{esql}} mode, the query bar shows a sample query. For exampl from kibana_sample_data_logs | limit 10 ``` -Every query starts with a [source command](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html). In this query, the source command is [`FROM`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-from). `FROM` retrieves data from data streams, indices, or aliases. In this example, the data is retrieved from `kibana_sample_data_logs`. +Every query starts with a [source command](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md). In this query, the source command is [`FROM`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-from). `FROM` retrieves data from data streams, indices, or aliases. In this example, the data is retrieved from `kibana_sample_data_logs`. A source command can be followed by one or more [processing commands](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html). In this query, the processing command is [`LIMIT`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-limit). `LIMIT` limits the number of rows that are retrieved. @@ -179,7 +179,7 @@ FROM my_index | WHERE custom_timestamp >= ?_tstart AND custom_timestamp < ?_tend ``` -You can also use the `?_tstart` and `?_tend` parameters with the [`BUCKET`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-bucket) function to create auto-incrementing time buckets in {{esql}} [visualizations](#esql-kibana-visualizations). For example: +You can also use the `?_tstart` and `?_tend` parameters with the [`BUCKET`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-functions-operators.md#esql-bucket) function to create auto-incrementing time buckets in {{esql}} [visualizations](#esql-kibana-visualizations). For example: ```esql FROM kibana_sample_data_logs @@ -254,7 +254,7 @@ You can also edit the {{esql}} visualization from here. Click the options button ## Create an enrich policy [esql-kibana-enrich] -The {{esql}} [`ENRICH`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich) command enables you to [enrich](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-enrich-data.html) your query dataset with fields from another dataset. Before you can use `ENRICH`, you need to [create and execute an enrich policy](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-enrich-data.html#esql-set-up-enrich-policy). If a policy exists, it will be suggested by auto-complete. If not, click **Click to create** to create one. +The {{esql}} [`ENRICH`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich) command enables you to [enrich](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-enrich-data.md) your query dataset with fields from another dataset. Before you can use `ENRICH`, you need to [create and execute an enrich policy](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-enrich-data.html#esql-set-up-enrich-policy). If a policy exists, it will be suggested by auto-complete. If not, click **Click to create** to create one. :::{image} ../../../images/elasticsearch-reference-esql-kibana-enrich-autocomplete.png :alt: esql kibana enrich autocomplete diff --git a/explore-analyze/query-filter/languages/esql-multi-index.md b/explore-analyze/query-filter/languages/esql-multi-index.md index 39be1b8c8c..e11652c7cf 100644 --- a/explore-analyze/query-filter/languages/esql-multi-index.md +++ b/explore-analyze/query-filter/languages/esql-multi-index.md @@ -113,7 +113,7 @@ This functionality is in technical preview and may be changed or removed in a fu :::: -{{esql}} has a way to handle [field type mismatches](#esql-multi-index-invalid-mapping). When the same field is mapped to multiple types in multiple indices, the type of the field is understood to be a *union* of the various types in the index mappings. As seen in the preceding examples, this *union type* cannot be used in the results, and cannot be referred to by the query — except in `KEEP`, `DROP` or when it’s passed to a type conversion function that accepts all the types in the *union* and converts the field to a single type. {{esql}} offers a suite of [type conversion functions](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-type-conversion-functions) to achieve this. +{{esql}} has a way to handle [field type mismatches](#esql-multi-index-invalid-mapping). When the same field is mapped to multiple types in multiple indices, the type of the field is understood to be a *union* of the various types in the index mappings. As seen in the preceding examples, this *union type* cannot be used in the results, and cannot be referred to by the query — except in `KEEP`, `DROP` or when it’s passed to a type conversion function that accepts all the types in the *union* and converts the field to a single type. {{esql}} offers a suite of [type conversion functions](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-functions-operators.md#esql-type-conversion-functions) to achieve this. In the above examples, the query can use a command like `EVAL client_ip = TO_IP(client_ip)` to resolve the union of `ip` and `keyword` to just `ip`. You can also use the type-conversion syntax `EVAL client_ip = client_ip::IP`. Alternatively, the query could use [`TO_STRING`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html#esql-to_string) to convert all supported types into `KEYWORD`. @@ -139,7 +139,7 @@ FROM events_* ## Index metadata [esql-multi-index-index-metadata] -It can be helpful to know the particular index from which each row is sourced. To get this information, use the [`METADATA`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-metadata-fields.html) option on the [`FROM`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-from) command. +It can be helpful to know the particular index from which each row is sourced. To get this information, use the [`METADATA`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-metadata-fields.md) option on the [`FROM`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md#esql-from) command. ```esql FROM events_* METADATA _index diff --git a/explore-analyze/query-filter/languages/esql.md b/explore-analyze/query-filter/languages/esql.md index dd3da84f76..ca57f9b1bc 100644 --- a/explore-analyze/query-filter/languages/esql.md +++ b/explore-analyze/query-filter/languages/esql.md @@ -28,7 +28,7 @@ mapped_urls: **Elasticsearch Query Language ({{esql}})** is a piped query language for filtering, transforming, and analyzing data. -You can author {{esql}} queries to find specific events, perform statistical analysis, and generate visualizations. It supports a wide range of [commands, functions and operators](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html) to perform various data operations, such as filtering, aggregation, time-series analysis, and more. Today, it supports a subset of the features available in Query DSL, but it is rapidly evolving. +You can author {{esql}} queries to find specific events, perform statistical analysis, and generate visualizations. It supports a wide range of [commands, functions and operators](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-functions-operators.md) to perform various data operations, such as filtering, aggregation, time-series analysis, and more. Today, it supports a subset of the features available in Query DSL, but it is rapidly evolving. ::::{note} **{{esql}}'s compute architecture** @@ -52,10 +52,10 @@ You can use it: ## Next steps Find more details about {{esql}} in the following documentation pages: -- [{{esql}} reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-language.html): - - Reference documentation for the [{{esql}} syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-syntax.html), [commands](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html), and [functions and operators](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html). - - Information about working with [metadata fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-metadata-fields.html) and [multivalued fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-multivalued-fields.html). - - Guidance for [data processing with DISSECT and GROK](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-process-data-with-dissect-and-grok.html) and [data enrichment with ENRICH](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-enrich-data.html). +- [{{esql}} reference](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql.md): + - Reference documentation for the [{{esql}} syntax](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-syntax.md), [commands](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md), and [functions and operators](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-functions-operators.html). + - Information about working with [metadata fields](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-metadata-fields.md) and [multivalued fields](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-multivalued-fields.md). + - Guidance for [data processing with DISSECT and GROK](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-process-data-with-dissect-grok.md) and [data enrichment with ENRICH](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-enrich-data.md). - Using {{esql}}: - An overview of using the [`_query` API endpoint](/explore-analyze/query-filter/languages/esql-rest.md). @@ -64,7 +64,7 @@ Find more details about {{esql}} in the following documentation pages: - [Using {{esql}} across clusters](/explore-analyze/query-filter/languages/esql-cross-clusters.md). - [Task management](/explore-analyze/query-filter/languages/esql-task-management.md). -- [Limitations](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-limitations.html): The current limitations of {{esql}}. +- [Limitations](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/limitations.md): The current limitations of {{esql}}. - [Examples](/explore-analyze/query-filter/languages/esql.md): A few examples of what you can do with {{esql}}. diff --git a/explore-analyze/query-filter/languages/example-detect-threats-with-eql.md b/explore-analyze/query-filter/languages/example-detect-threats-with-eql.md index 3cb6149265..3a4c3bb0ea 100644 --- a/explore-analyze/query-filter/languages/example-detect-threats-with-eql.md +++ b/explore-analyze/query-filter/languages/example-detect-threats-with-eql.md @@ -207,7 +207,7 @@ The query matches an event, confirming `scrobj.dll` was loaded. ## Determine the likelihood of success [eql-ex-detemine-likelihood-of-success] -In many cases, attackers use malicious scripts to connect to remote servers or download other files. Use an [EQL sequence query](https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-sequences) to check for the following series of events: +In many cases, attackers use malicious scripts to connect to remote servers or download other files. Use an [EQL sequence query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/eql-syntax.md#eql-sequences) to check for the following series of events: 1. A `regsvr32.exe` process 2. A load of the `scrobj.dll` library by the same process diff --git a/explore-analyze/query-filter/languages/kql.md b/explore-analyze/query-filter/languages/kql.md index 987ea41def..15fff1786a 100644 --- a/explore-analyze/query-filter/languages/kql.md +++ b/explore-analyze/query-filter/languages/kql.md @@ -101,7 +101,7 @@ You can also use range syntax for string values, IP addresses, and timestamps. F @timestamp < now-2w ``` -For more examples on acceptable date formats, refer to [Date Math](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#date-math). +For more examples on acceptable date formats, refer to [Date Math](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#date-math). ## Filter for documents using wildcards [_filter_for_documents_using_wildcards] @@ -112,7 +112,7 @@ To search for documents matching a pattern, use the wildcard syntax. For example http.response.status_code: 4* ``` -By default, leading wildcards are not allowed for performance reasons. You can modify this with the [`query:allowLeadingWildcards`](https://www.elastic.co/guide/en/kibana/current/advanced-options.html#query-allowleadingwildcards) advanced setting. +By default, leading wildcards are not allowed for performance reasons. You can modify this with the [`query:allowLeadingWildcards`](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md#query-allowleadingwildcards) advanced setting. ::::{note} Only `*` is currently supported. This matches zero or more characters. @@ -173,7 +173,7 @@ When using wildcards to query multiple fields, errors might occur if the fields ## Querying nested fields [_querying_nested_fields] -Querying [nested fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) requires a special syntax. Consider the following document, where `user` is a nested field: +Querying [nested fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/nested.md) requires a special syntax. Consider the following document, where `user` is a nested field: ```yaml { diff --git a/explore-analyze/query-filter/languages/lucene-query-syntax.md b/explore-analyze/query-filter/languages/lucene-query-syntax.md index 3420a56ad5..4dc8d61cf3 100644 --- a/explore-analyze/query-filter/languages/lucene-query-syntax.md +++ b/explore-analyze/query-filter/languages/lucene-query-syntax.md @@ -8,7 +8,7 @@ mapped_pages: # Lucene query syntax [lucene-query] -Lucene query syntax is available to {{kib}} users who opt out of the [{{kib}} Query Language](kql.md). Full documentation for this syntax is available as part of {{es}} [query string syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax). +Lucene query syntax is available to {{kib}} users who opt out of the [{{kib}} Query Language](kql.md). Full documentation for this syntax is available as part of {{es}} [query string syntax](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-query-string-query.md#query-string-syntax). The main reason to use the Lucene query syntax in {{kib}} is for advanced Lucene features, such as regular expressions or fuzzy term matching. However, Lucene syntax is not able to search nested objects or scripted fields. diff --git a/explore-analyze/query-filter/languages/querydsl.md b/explore-analyze/query-filter/languages/querydsl.md index 6e207eaeb6..0b296409f9 100644 --- a/explore-analyze/query-filter/languages/querydsl.md +++ b/explore-analyze/query-filter/languages/querydsl.md @@ -39,10 +39,10 @@ The [`_search` endpoint](../../../solutions/search/querying-for-search.md) accep Query DSL support a wide range of search techniques, including the following: * [**Full-text search**](/solutions/search/full-text.md): Search text that has been analyzed and indexed to support phrase or proximity queries, fuzzy matches, and more. -* [**Keyword search**](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html): Search for exact matches using `keyword` fields. +* [**Keyword search**](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md): Search for exact matches using `keyword` fields. * [**Semantic search**](/solutions/search/semantic-search/semantic-search-semantic-text.md): Search `semantic_text` fields using dense or sparse vector search on embeddings generated in your {{es}} cluster. * [**Vector search**](/solutions/search/vector/knn.md): Search for similar dense vectors using the kNN algorithm for embeddings generated outside of {{es}}. -* [**Geospatial search**](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-queries.html): Search for locations and calculate spatial relationships using geospatial queries. +* [**Geospatial search**](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/geo-queries.md): Search for locations and calculate spatial relationships using geospatial queries. You can also filter data using Query DSL. Filters enable you to include or exclude documents by retrieving documents that match specific field-level criteria. A query that uses the `filter` parameter indicates [filter context](#filter-context). @@ -54,9 +54,9 @@ Because aggregations leverage the same data structures used for search, they are The following aggregation types are available: -* [Metric](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html): Calculate metrics, such as a sum or average, from field values. -* [Bucket](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html): Group documents into buckets based on field values, ranges, or other criteria. -* [Pipeline](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html): Run aggregations on the results of other aggregations. +* [Metric](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/metrics.md): Calculate metrics, such as a sum or average, from field values. +* [Bucket](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/bucket.md): Group documents into buckets based on field values, ranges, or other criteria. +* [Pipeline](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/pipeline.md): Run aggregations on the results of other aggregations. Run aggregations by specifying the [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search)'s `aggs` parameter. Learn more in [Run an aggregation](/explore-analyze/query-filter/aggregations.md#run-an-agg). @@ -65,9 +65,9 @@ Run aggregations by specifying the [search API](https://www.elastic.co/docs/api/ Think of the Query DSL as an AST (Abstract Syntax Tree) of queries, consisting of two types of clauses: -**Leaf query clauses**: Leaf query clauses look for a particular value in a particular field, such as the [`match`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html), [`term`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html) or [`range`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) queries. These queries can be used by themselves. +**Leaf query clauses**: Leaf query clauses look for a particular value in a particular field, such as the [`match`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-query.md), [`term`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-term-query.md) or [`range`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-range-query.md) queries. These queries can be used by themselves. -**Compound query clauses**: Compound query clauses wrap other leaf **or** compound queries and are used to combine multiple queries in a logical fashion (such as the [`bool`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html) or [`dis_max`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html) query), or to alter their behavior (such as the [`constant_score`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html) query). +**Compound query clauses**: Compound query clauses wrap other leaf **or** compound queries and are used to combine multiple queries in a logical fashion (such as the [`bool`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-bool-query.md) or [`dis_max`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-dis-max-query.md) query), or to alter their behavior (such as the [`constant_score`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-constant-score-query.md) query). Query clauses behave differently depending on whether they are used in [query context or filter context](#query-filter-context). @@ -77,22 +77,22 @@ $$$query-dsl-allow-expensive-queries$$$ - Queries that need to do linear scans to identify matches: - - [`script` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html) - - queries on [numeric](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html), [date](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html), [boolean](https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html), [ip](https://www.elastic.co/guide/en/elasticsearch/reference/current/ip.html), [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [keyword](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields that are not indexed but have [doc values](https://www.elastic.co/guide/en/elasticsearch/reference/current/doc-values.html) enabled + - [`script` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-query.md) + - queries on [numeric](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md), [date](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md), [boolean](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/boolean.md), [ip](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ip.md), [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [keyword](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields that are not indexed but have [doc values](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/doc-values.md) enabled - Queries that have a high up-front cost: - - [`fuzzy` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) - - [`regexp` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) - - [`prefix` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields or those without [`index_prefixes`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-prefixes.html)) - - [`wildcard` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) - - [`range` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) on [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields + - [`fuzzy` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-fuzzy-query.md) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) + - [`regexp` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-regexp-query.md) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) + - [`prefix` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-prefix-query.md) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields or those without [`index_prefixes`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/index-prefixes.md)) + - [`wildcard` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-wildcard-query.md) (except on [`wildcard`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#wildcard-field-type) fields) + - [`range` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) on [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) and [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) fields - - [Joining queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/joining-queries.html) + - [Joining queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/joining-queries.md) - Queries that may have a high per-document cost: - - [`script_score` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html) - - [`percolate` queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-percolate-query.html) + - [`script_score` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-score-query.md) + - [`percolate` queries](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-percolate-query.md) The execution of such queries can be prevented by setting the value of the `search.allow_expensive_queries` setting to `false` (defaults to `true`). @@ -144,7 +144,7 @@ Filter context applies when a query clause is passed to a `filter` parameter, su * `filter` or `must_not` parameters in [`bool`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html) queries * `filter` parameter in [`constant_score`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html) queries -* [`filter`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html) aggregations +* [`filter`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-filter-aggregation.md) aggregations Filters optimize query performance and efficiency, especially for structured data queries and when combined with full-text searches. diff --git a/explore-analyze/query-filter/languages/sql-cli.md b/explore-analyze/query-filter/languages/sql-cli.md index 871b0ac51b..7becabafad 100644 --- a/explore-analyze/query-filter/languages/sql-cli.md +++ b/explore-analyze/query-filter/languages/sql-cli.md @@ -83,7 +83,7 @@ fetch separator set to "---------------------" ``` `lenient = ` (default `false`) -: If `false`, Elasticsearch SQL returns an error for fields containing [array values](https://www.elastic.co/guide/en/elasticsearch/reference/current/array.html). If `true`, Elasticsearch SQL returns the first value from the array with no guarantee of consistent results. +: If `false`, Elasticsearch SQL returns an error for fields containing [array values](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/array.md). If `true`, Elasticsearch SQL returns the first value from the array with no guarantee of consistent results. ```sql sql> lenient = true; diff --git a/explore-analyze/query-filter/languages/sql-data-types.md b/explore-analyze/query-filter/languages/sql-data-types.md index 7f7d4ca73d..bd2f9b0a1d 100644 --- a/explore-analyze/query-filter/languages/sql-data-types.md +++ b/explore-analyze/query-filter/languages/sql-data-types.md @@ -12,9 +12,9 @@ mapped_pages: | --- | --- | --- | --- | | **{{es}} type** | **Elasticsearch SQL type** | **SQL type** | **SQL precision** | | Core types | -| [`null`](https://www.elastic.co/guide/en/elasticsearch/reference/current/null-value.html) | `null` | NULL | 0 | -| [`boolean`](https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html) | `boolean` | BOOLEAN | 1 | -| [`byte`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `byte` | TINYINT | 3 | +| [`null`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/null-value.md) | `null` | NULL | 0 | +| [`boolean`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/boolean.md) | `boolean` | BOOLEAN | 1 | +| [`byte`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) | `byte` | TINYINT | 3 | | [`short`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `short` | SMALLINT | 5 | | [`integer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `integer` | INTEGER | 10 | | [`long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `long` | BIGINT | 19 | @@ -23,20 +23,20 @@ mapped_pages: | [`float`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `float` | REAL | 7 | | [`half_float`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `half_float` | FLOAT | 3 | | [`scaled_float`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) | `scaled_float` | DOUBLE | 15 | -| [keyword type family](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) | `keyword` | VARCHAR | 32,766 | -| [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) | `text` | VARCHAR | 2,147,483,647 | -| [`binary`](https://www.elastic.co/guide/en/elasticsearch/reference/current/binary.html) | `binary` | VARBINARY | 2,147,483,647 | -| [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) | `datetime` | TIMESTAMP | 29 | -| [`ip`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ip.html) | `ip` | VARCHAR | 39 | -| [`version`](https://www.elastic.co/guide/en/elasticsearch/reference/current/version.html) | `version` | VARCHAR | 32,766 | +| [keyword type family](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) | `keyword` | VARCHAR | 32,766 | +| [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) | `text` | VARCHAR | 2,147,483,647 | +| [`binary`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/binary.md) | `binary` | VARBINARY | 2,147,483,647 | +| [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) | `datetime` | TIMESTAMP | 29 | +| [`ip`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ip.md) | `ip` | VARCHAR | 39 | +| [`version`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/version.md) | `version` | VARCHAR | 32,766 | | Complex types | -| [`object`](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) | `object` | STRUCT | 0 | -| [`nested`](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) | `nested` | STRUCT | 0 | +| [`object`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) | `object` | STRUCT | 0 | +| [`nested`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/nested.md) | `nested` | STRUCT | 0 | | Unsupported types | | *types not mentioned above* | `unsupported` | OTHER | 0 | ::::{note} -Most of {{es}} [data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html) are available in Elasticsearch SQL, as indicated above. As one can see, all of {{es}} [data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html) are mapped to the data type with the same name in Elasticsearch SQL, with the exception of **date** data type which is mapped to **datetime** in Elasticsearch SQL. This is to avoid confusion with the ANSI SQL types **DATE** (date only) and **TIME** (time only), which are also supported by Elasticsearch SQL in queries (with the use of [`CAST`](sql-functions-type-conversion.md#sql-functions-type-conversion-cast)/[`CONVERT`](sql-functions-type-conversion.md#sql-functions-type-conversion-convert)), but don’t correspond to an actual mapping in {{es}} (see the [`table`](#es-sql-only-types) below). +Most of {{es}} [data types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md) are available in Elasticsearch SQL, as indicated above. As one can see, all of {{es}} [data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html) are mapped to the data type with the same name in Elasticsearch SQL, with the exception of **date** data type which is mapped to **datetime** in Elasticsearch SQL. This is to avoid confusion with the ANSI SQL types **DATE** (date only) and **TIME** (time only), which are also supported by Elasticsearch SQL in queries (with the use of [`CAST`](sql-functions-type-conversion.md#sql-functions-type-conversion-cast)/[`CONVERT`](sql-functions-type-conversion.md#sql-functions-type-conversion-convert)), but don’t correspond to an actual mapping in {{es}} (see the [`table`](#es-sql-only-types) below). :::: @@ -72,9 +72,9 @@ The table below indicates these types: ## SQL and multi-fields [sql-multi-field] -A core concept in {{es}} is that of an `analyzed` field, that is a full-text value that is interpreted in order to be effectively indexed. These fields are of type [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and are not used for sorting or aggregations as their actual value depends on the [`analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analyzer.html) used hence why {{es}} also offers the [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) type for storing the *exact* value. +A core concept in {{es}} is that of an `analyzed` field, that is a full-text value that is interpreted in order to be effectively indexed. These fields are of type [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) and are not used for sorting or aggregations as their actual value depends on the [`analyzer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/analyzer.md) used hence why {{es}} also offers the [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) type for storing the *exact* value. -In most case, and the default actually, is to use both types for strings which {{es}} supports through [multi-fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html), that is the ability to index the same string in multiple ways; for example index it both as `text` for search but also as `keyword` for sorting and aggregations. +In most case, and the default actually, is to use both types for strings which {{es}} supports through [multi-fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md), that is the ability to index the same string in multiple ways; for example index it both as `text` for search but also as `keyword` for sorting and aggregations. As SQL requires exact values, when encountering a `text` field Elasticsearch SQL will search for an exact multi-field that it can use for comparisons, sorting and aggregations. To do that, it will search for the first `keyword` that it can find that is *not* normalized and use that as the original field *exact* value. diff --git a/explore-analyze/query-filter/languages/sql-functions-aggs.md b/explore-analyze/query-filter/languages/sql-functions-aggs.md index 4337d4a32d..2e66e3bbe2 100644 --- a/explore-analyze/query-filter/languages/sql-functions-aggs.md +++ b/explore-analyze/query-filter/languages/sql-functions-aggs.md @@ -249,7 +249,7 @@ M |emzi ::::{note} -`FIRST` cannot be used with columns of type [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) unless the field is also [saved as a keyword](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html#before-enabling-fielddata). +`FIRST` cannot be used with columns of type [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) unless the field is also [saved as a keyword](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html#before-enabling-fielddata). :::: @@ -407,7 +407,7 @@ SELECT MAX(ABS(salary / -12.0)) AS max FROM emp; ``` ::::{note} -`MAX` on a field of type [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) or [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) is translated into [`LAST/LAST_VALUE`](#sql-functions-aggs-last) and therefore, it cannot be used in `HAVING` clause. +`MAX` on a field of type [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) or [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) is translated into [`LAST/LAST_VALUE`](#sql-functions-aggs-last) and therefore, it cannot be used in `HAVING` clause. :::: @@ -560,7 +560,7 @@ PERCENTILE( 1. a numeric field. If this field contains only `null` values, the function returns `null`. Otherwise, the function ignores `null` values in this field. 2. a numeric expression (must be a constant and not based on a field). If `null`, the function returns `null`. -3. optional string literal for the [percentile algorithm](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html#search-aggregations-metrics-percentile-aggregation-approximation). Possible values: `tdigest` or `hdr`. Defaults to `tdigest`. +3. optional string literal for the [percentile algorithm](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-percentile-aggregation.md#search-aggregations-metrics-percentile-aggregation-approximation). Possible values: `tdigest` or `hdr`. Defaults to `tdigest`. 4. optional numeric literal that configures the [percentile algorithm](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html#search-aggregations-metrics-percentile-aggregation-approximation). Configures `compression` for `tdigest` or `number_of_significant_value_digits` for `hdr`. The default is the same as that of the backing algorithm. diff --git a/explore-analyze/query-filter/languages/sql-functions-datetime.md b/explore-analyze/query-filter/languages/sql-functions-datetime.md index 7f8590d224..bbfacaaa95 100644 --- a/explore-analyze/query-filter/languages/sql-functions-datetime.md +++ b/explore-analyze/query-filter/languages/sql-functions-datetime.md @@ -14,7 +14,7 @@ Elasticsearch SQL offers a wide range of facilities for performing date/time man A common requirement when dealing with date/time in general revolves around the notion of `interval`, a topic that is worth exploring in the context of {{es}} and Elasticsearch SQL. -{{es}} has comprehensive support for [date math](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#date-math) both inside [index names](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-date-math-index-names) and [queries](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html). Inside Elasticsearch SQL the former is supported as is by passing the expression in the table name, while the latter is supported through the standard SQL `INTERVAL`. +{{es}} has comprehensive support for [date math](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#date-math) both inside [index names](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-date-math-index-names) and [queries](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-date-format.md). Inside Elasticsearch SQL the former is supported as is by passing the expression in the table name, while the latter is supported through the standard SQL `INTERVAL`. The table below shows the mapping between {{es}} and Elasticsearch SQL: diff --git a/explore-analyze/query-filter/languages/sql-functions-geo.md b/explore-analyze/query-filter/languages/sql-functions-geo.md index 325bc253b4..032c7fe176 100644 --- a/explore-analyze/query-filter/languages/sql-functions-geo.md +++ b/explore-analyze/query-filter/languages/sql-functions-geo.md @@ -17,7 +17,7 @@ The geo functions work with geometries stored in `geo_point`, `geo_shape` and `s ## Limitations [_limitations_4] -[`geo_point`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html), [`geo_shape`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html) and [`shape`](https://www.elastic.co/guide/en/elasticsearch/reference/current/shape.html) and types are represented in SQL as geometry and can be used interchangeably with the following exceptions: +[`geo_point`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md), [`geo_shape`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md) and [`shape`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/shape.md) and types are represented in SQL as geometry and can be used interchangeably with the following exceptions: * `geo_shape` and `shape` fields don’t have doc values, therefore these fields cannot be used for filtering, grouping or sorting. * `geo_points` fields are indexed and have doc values by default, however only latitude and longitude are stored and indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and 8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. diff --git a/explore-analyze/query-filter/languages/sql-functions-grouping.md b/explore-analyze/query-filter/languages/sql-functions-grouping.md index 3da6a85f19..09b01cba0f 100644 --- a/explore-analyze/query-filter/languages/sql-functions-grouping.md +++ b/explore-analyze/query-filter/languages/sql-functions-grouping.md @@ -39,7 +39,7 @@ bucket_key = Math.floor(value / interval) * interval ``` ::::{note} -The histogram in SQL does **NOT** return empty buckets for missing intervals as the traditional [histogram](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html) and [date histogram](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html). Such behavior does not fit conceptually in SQL which treats all missing values as `null`; as such the histogram places all missing values in the `null` group. +The histogram in SQL does **NOT** return empty buckets for missing intervals as the traditional [histogram](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-histogram-aggregation.md) and [date histogram](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-datehistogram-aggregation.md). Such behavior does not fit conceptually in SQL which treats all missing values as `null`; as such the histogram places all missing values in the `null` group. :::: diff --git a/explore-analyze/query-filter/languages/sql-functions-search.md b/explore-analyze/query-filter/languages/sql-functions-search.md index 11fbbb0800..5ada99c76f 100644 --- a/explore-analyze/query-filter/languages/sql-functions-search.md +++ b/explore-analyze/query-filter/languages/sql-functions-search.md @@ -10,7 +10,7 @@ mapped_pages: Search functions should be used when performing full-text search, namely when the `MATCH` or `QUERY` predicates are being used. Outside a, so-called, search context, these functions will return default values such as `0` or `NULL`. -Elasticsearch SQL optimizes all queries executed against {{es}} depending on the scoring needs. Using [`track_scores`](https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html#_track_scores) on the search request or [`_doc` sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html) that disables scores calculation, Elasticsearch SQL instructs {{es}} not to compute scores when these are not needed. For example, every time a `SCORE()` function is encountered in the SQL query, the scores are computed. +Elasticsearch SQL optimizes all queries executed against {{es}} depending on the scoring needs. Using [`track_scores`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/sort-search-results.md#_track_scores) on the search request or [`_doc` sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html) that disables scores calculation, Elasticsearch SQL instructs {{es}} not to compute scores when these are not needed. For example, every time a `SCORE()` function is encountered in the SQL query, the scores are computed. ## `MATCH` [sql-functions-search-match] @@ -28,7 +28,7 @@ MATCH( 3. additional parameters; optional -**Description**: A full-text search option, in the form of a predicate, available in Elasticsearch SQL that gives the user control over powerful [match](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html) and [multi_match](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html) {{es}} queries. +**Description**: A full-text search option, in the form of a predicate, available in Elasticsearch SQL that gives the user control over powerful [match](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-query.md) and [multi_match](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-multi-match-query.md) {{es}} queries. The first parameter is the field or fields to match against. In case it receives one value only, Elasticsearch SQL will use a `match` query to perform the search: @@ -98,7 +98,7 @@ QUERY( 2. additional parameters; optional -**Description**: Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user control over the [query_string](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html) query in {{es}}. +**Description**: Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user control over the [query_string](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-query-string-query.md) query in {{es}}. The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` accepts in its `query` field can be used here as well: @@ -159,7 +159,7 @@ SCORE() **Description**: Returns the [relevance](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/relevance-intro.html) of a given input to the executed query. The higher score, the more relevant the data. ::::{note} -When doing multiple text queries in the `WHERE` clause then, their scores will be combined using the same rules as {{es}}'s [bool query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html). +When doing multiple text queries in the `WHERE` clause then, their scores will be combined using the same rules as {{es}}'s [bool query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-bool-query.md). :::: diff --git a/explore-analyze/query-filter/languages/sql-index-patterns.md b/explore-analyze/query-filter/languages/sql-index-patterns.md index 96fec0d4fa..fd2e82d535 100644 --- a/explore-analyze/query-filter/languages/sql-index-patterns.md +++ b/explore-analyze/query-filter/languages/sql-index-patterns.md @@ -13,7 +13,7 @@ Elasticsearch SQL supports two types of patterns for matching multiple indices o ## {{es}} multi-target syntax [sql-index-patterns-multi] -The {{es}} notation for enumerating, including or excluding [multi-target syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) is supported *as long* as it is quoted or escaped as a table identifier. +The {{es}} notation for enumerating, including or excluding [multi-target syntax](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) is supported *as long* as it is quoted or escaped as a table identifier. For example: diff --git a/explore-analyze/query-filter/languages/sql-lexical-structure.md b/explore-analyze/query-filter/languages/sql-lexical-structure.md index 6559b4daf1..71f1cb6d2a 100644 --- a/explore-analyze/query-filter/languages/sql-lexical-structure.md +++ b/explore-analyze/query-filter/languages/sql-lexical-structure.md @@ -43,7 +43,7 @@ Identifiers can be of two types: *quoted* and *unquoted*: SELECT ip_address FROM "hosts-*" ``` -This query has two identifiers, `ip_address` and `hosts-*` (an [index pattern](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index)). As `ip_address` does not clash with any key words it can be used verbatim, `hosts-*` on the other hand cannot as it clashes with `-` (minus operation) and `*` hence the double quotes. +This query has two identifiers, `ip_address` and `hosts-*` (an [index pattern](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index)). As `ip_address` does not clash with any key words it can be used verbatim, `hosts-*` on the other hand cannot as it clashes with `-` (minus operation) and `*` hence the double quotes. Another example: diff --git a/explore-analyze/query-filter/languages/sql-like-rlike-operators.md b/explore-analyze/query-filter/languages/sql-like-rlike-operators.md index 97d8e6c2de..1e0fc51e32 100644 --- a/explore-analyze/query-filter/languages/sql-like-rlike-operators.md +++ b/explore-analyze/query-filter/languages/sql-like-rlike-operators.md @@ -11,7 +11,7 @@ mapped_pages: `LIKE` and `RLIKE` operators are commonly used to filter data based on string patterns. They usually act on a field placed on the left-hand side of the operator, but can also act on a constant (literal) expression. The right-hand side of the operator represents the pattern. Both can be used in the `WHERE` clause of the `SELECT` statement, but `LIKE` can also be used in other places, such as defining an [index pattern](sql-index-patterns.md) or across various [SHOW commands](sql-commands.md). This section covers only the `SELECT ... WHERE ...` usage. ::::{note} -One significant difference between `LIKE`/`RLIKE` and the [full-text search predicates](sql-functions-search.md) is that the former act on [exact fields](sql-data-types.md#sql-multi-field) while the latter also work on [analyzed](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) fields. If the field used with `LIKE`/`RLIKE` doesn’t have an exact not-normalized sub-field (of [keyword](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) type) Elasticsearch SQL will not be able to run the query. If the field is either exact or has an exact sub-field, it will use it as is, or it will automatically use the exact sub-field even if it wasn’t explicitly specified in the statement. +One significant difference between `LIKE`/`RLIKE` and the [full-text search predicates](sql-functions-search.md) is that the former act on [exact fields](sql-data-types.md#sql-multi-field) while the latter also work on [analyzed](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) fields. If the field used with `LIKE`/`RLIKE` doesn’t have an exact not-normalized sub-field (of [keyword](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) type) Elasticsearch SQL will not be able to run the query. If the field is either exact or has an exact sub-field, it will use it as is, or it will automatically use the exact sub-field even if it wasn’t explicitly specified in the statement. :::: @@ -73,7 +73,7 @@ RLIKE constant_exp <2> **Description**: This operator is similar to `LIKE`, but the user is not limited to search for a string based on a fixed pattern with the percent sign (`%`) and underscore (`_`); the pattern in this case is a regular expression which allows the construction of more flexible patterns. -For supported syntax, see [*Regular expression syntax*](https://www.elastic.co/guide/en/elasticsearch/reference/current/regexp-syntax.html). +For supported syntax, see [*Regular expression syntax*](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/regexp-syntax.md). ```sql SELECT author, name FROM library WHERE name RLIKE 'Child.* Dune'; diff --git a/explore-analyze/query-filter/languages/sql-limitations.md b/explore-analyze/query-filter/languages/sql-limitations.md index d306762bfd..f7b3f8d8a7 100644 --- a/explore-analyze/query-filter/languages/sql-limitations.md +++ b/explore-analyze/query-filter/languages/sql-limitations.md @@ -134,7 +134,7 @@ But, if the sub-select would include a `GROUP BY` or `HAVING` or the enclosing ` ## Using [`FIRST`](sql-functions-aggs.md#sql-functions-aggs-first)/[`LAST`](sql-functions-aggs.md#sql-functions-aggs-last) aggregation functions in `HAVING` clause [first-last-agg-functions-having-clause] -Using `FIRST` and `LAST` in the `HAVING` clause is not supported. The same applies to [`MIN`](sql-functions-aggs.md#sql-functions-aggs-min) and [`MAX`](sql-functions-aggs.md#sql-functions-aggs-max) when their target column is of type [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) or [`unsigned_long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) as they are internally translated to `FIRST` and `LAST`. +Using `FIRST` and `LAST` in the `HAVING` clause is not supported. The same applies to [`MIN`](sql-functions-aggs.md#sql-functions-aggs-min) and [`MAX`](sql-functions-aggs.md#sql-functions-aggs-max) when their target column is of type [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) or [`unsigned_long`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) as they are internally translated to `FIRST` and `LAST`. ## Using TIME data type in GROUP BY or [`HISTOGRAM`](sql-functions-grouping.md#sql-functions-grouping-histogram) [group-by-time] @@ -167,7 +167,7 @@ By default,`geo_points` fields are indexed and have doc values. However only lat ## Retrieving using the `fields` search parameter [using-fields-api] -Elasticsearch SQL retrieves column values using the [search API’s `fields` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#search-fields-param). Any limitations on the `fields` parameter also apply to Elasticsearch SQL queries. For example, if `_source` is disabled for any of the returned fields or at index level, the values cannot be retrieved. +Elasticsearch SQL retrieves column values using the [search API’s `fields` parameter](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#search-fields-param). Any limitations on the `fields` parameter also apply to Elasticsearch SQL queries. For example, if `_source` is disabled for any of the returned fields or at index level, the values cannot be retrieved. ## Aggregations in the [`PIVOT`](sql-syntax-select.md#sql-syntax-pivot) clause [aggs-in-pivot] diff --git a/explore-analyze/query-filter/languages/sql-pagination.md b/explore-analyze/query-filter/languages/sql-pagination.md index 168a6139a8..ed52963f73 100644 --- a/explore-analyze/query-filter/languages/sql-pagination.md +++ b/explore-analyze/query-filter/languages/sql-pagination.md @@ -34,7 +34,7 @@ Which looks like: Note that the `columns` object is only part of the first page. -You’ve reached the last page when there is no `cursor` returned in the results. Like Elasticsearch’s [scroll](https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results), SQL may keep state in Elasticsearch to support the cursor. Unlike scroll, receiving the last page is enough to guarantee that the Elasticsearch state is cleared. +You’ve reached the last page when there is no `cursor` returned in the results. Like Elasticsearch’s [scroll](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/paginate-search-results.md#scroll-search-results), SQL may keep state in Elasticsearch to support the cursor. Unlike scroll, receiving the last page is enough to guarantee that the Elasticsearch state is cleared. To clear the state earlier, use the [clear cursor API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor): diff --git a/explore-analyze/query-filter/languages/sql-rest-filtering.md b/explore-analyze/query-filter/languages/sql-rest-filtering.md index a8db4eaf3b..a94a058338 100644 --- a/explore-analyze/query-filter/languages/sql-rest-filtering.md +++ b/explore-analyze/query-filter/languages/sql-rest-filtering.md @@ -35,7 +35,7 @@ Douglas Adams |The Hitchhiker's Guide to the Galaxy|180 |1979-10-12T ``` ::::{tip} -A useful and less obvious usage for standard Query DSL filtering is to search documents by a specific [routing key](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shard-routing.html#search-routing). Because Elasticsearch SQL does not support a `routing` parameter, one can specify a [`terms` filter for the `_routing` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-routing-field.html) instead: +A useful and less obvious usage for standard Query DSL filtering is to search documents by a specific [routing key](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/search-shard-routing.md#search-routing). Because Elasticsearch SQL does not support a `routing` parameter, one can specify a [`terms` filter for the `_routing` field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-routing-field.md) instead: ```console POST /_sql?format=txt diff --git a/explore-analyze/query-filter/languages/sql-syntax-select.md b/explore-analyze/query-filter/languages/sql-syntax-select.md index 4b5ebc42b1..edff250ebc 100644 --- a/explore-analyze/query-filter/languages/sql-syntax-select.md +++ b/explore-analyze/query-filter/languages/sql-syntax-select.md @@ -133,7 +133,7 @@ SELECT * FROM "emp" LIMIT 1; 1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00.000Z|2 |Facello |Georgi Facello |57305 ``` -The name can be a [pattern](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that **all** resolved concrete tables have **exact mapping**. +The name can be a [pattern](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that **all** resolved concrete tables have **exact mapping**. ```sql SELECT emp_no FROM "e*p" LIMIT 1; @@ -507,7 +507,7 @@ Ordering by aggregation is possible for up to **10000** entries for memory consu When doing full-text queries in the `WHERE` clause, results can be returned based on their [score](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/relevance-intro.html) or *relevance* to the given query. ::::{note} -When doing multiple text queries in the `WHERE` clause then, their scores will be combined using the same rules as {{es}}'s [bool query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html). +When doing multiple text queries in the `WHERE` clause then, their scores will be combined using the same rules as {{es}}'s [bool query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-bool-query.md). :::: diff --git a/explore-analyze/query-filter/languages/sql-syntax-show-tables.md b/explore-analyze/query-filter/languages/sql-syntax-show-tables.md index 9fbc6c6394..29f5e0fd06 100644 --- a/explore-analyze/query-filter/languages/sql-syntax-show-tables.md +++ b/explore-analyze/query-filter/languages/sql-syntax-show-tables.md @@ -38,7 +38,7 @@ javaRestTest |employees |VIEW |ALIAS javaRestTest |library |TABLE |INDEX ``` -Match multiple indices by using {{es}} [multi-target syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-multi-index) notation: +Match multiple indices by using {{es}} [multi-target syntax](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) notation: ```sql SHOW TABLES "*,-l*"; diff --git a/explore-analyze/query-filter/languages/sql-translate.md b/explore-analyze/query-filter/languages/sql-translate.md index 77860281c6..f720d8ef82 100644 --- a/explore-analyze/query-filter/languages/sql-translate.md +++ b/explore-analyze/query-filter/languages/sql-translate.md @@ -52,7 +52,7 @@ Which returns: } ``` -Which is the request that SQL will run to provide the results. In this case, SQL will use the [scroll](https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results) API. If the result contained an aggregation then SQL would use the normal [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search). +Which is the request that SQL will run to provide the results. In this case, SQL will use the [scroll](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/paginate-search-results.md#scroll-search-results) API. If the result contained an aggregation then SQL would use the normal [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search). The request body accepts the same [parameters](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) as the [SQL search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query), excluding `cursor`. diff --git a/explore-analyze/query-filter/tools/console.md b/explore-analyze/query-filter/tools/console.md index 8d71601432..794a728260 100644 --- a/explore-analyze/query-filter/tools/console.md +++ b/explore-analyze/query-filter/tools/console.md @@ -26,7 +26,7 @@ $$$configuring-console$$$ $$$import-export-console-requests$$$ -**Console** is an interactive UI for sending requests to [{{es}} APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-apis.html) and [{{kib}} APIs](https://www.elastic.co/docs/api) and viewing their responses. +**Console** is an interactive UI for sending requests to [{{es}} APIs](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/index.md) and [{{kib}} APIs](https://www.elastic.co/docs/api) and viewing their responses. :::{image} ../../../images/kibana-console.png :alt: Console diff --git a/explore-analyze/query-filter/tools/grok-debugger.md b/explore-analyze/query-filter/tools/grok-debugger.md index d3f5e0fc18..2bbba17b34 100644 --- a/explore-analyze/query-filter/tools/grok-debugger.md +++ b/explore-analyze/query-filter/tools/grok-debugger.md @@ -10,7 +10,7 @@ mapped_pages: You can build and debug grok patterns in the {{kib}} **Grok Debugger** before you use them in your data processing pipelines. Grok is a pattern matching syntax that you can use to parse arbitrary text and structure it. Grok is good for parsing syslog, apache, and other webserver logs, mysql logs, and in general, any log format that is written for human consumption. -Grok patterns are supported in {{es}} [runtime fields](../../../manage-data/data-store/mapping/runtime-fields.md), the {{es}} [grok ingest processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html), and the {{ls}} [grok filter](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html). For syntax, see [Grokking grok](../../scripting/grok.md). +Grok patterns are supported in {{es}} [runtime fields](../../../manage-data/data-store/mapping/runtime-fields.md), the {{es}} [grok ingest processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/grok-processor.md), and the {{ls}} [grok filter](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-grok.md). For syntax, see [Grokking grok](../../scripting/grok.md). The {{stack}} ships with more than 120 reusable grok patterns. For a complete list of patterns, see [{{es}} grok patterns](https://github.com/elastic/elasticsearch/tree/master/libs/grok/src/main/resources/patterns) and [{{ls}} grok patterns](https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns). diff --git a/explore-analyze/report-and-share.md b/explore-analyze/report-and-share.md index 304e08fdcb..5c781c4bf2 100644 --- a/explore-analyze/report-and-share.md +++ b/explore-analyze/report-and-share.md @@ -132,7 +132,7 @@ To work around the limitations, use filters to create multiple smaller reports, For more information on using Elasticsearch APIs directly, see [Scroll API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll), [Point in time API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time), [ES|QL](/explore-analyze/query-filter/languages/esql-rest.md) or [SQL](/explore-analyze/query-filter/languages/sql-rest-format.md#_csv) with CSV response data format. We recommend that you use an official Elastic language client: details for each programming language library that Elastic provides are in the [{{es}} Client documentation](https://www.elastic.co/guide/en/elasticsearch/client/index.html). -[Reporting parameters](https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html) can be adjusted to overcome some of these limiting scenarios. Results are dependent on data size, availability, and latency factors and are not guaranteed. +[Reporting parameters](asciidocalypse://docs/kibana/docs/reference/configuration-reference/reporting-settings.md) can be adjusted to overcome some of these limiting scenarios. Results are dependent on data size, availability, and latency factors and are not guaranteed. ### PNG/PDF report limitations [pdf-limitations] diff --git a/explore-analyze/report-and-share/reporting-troubleshooting-csv.md b/explore-analyze/report-and-share/reporting-troubleshooting-csv.md index ba8355836e..9fad6badba 100644 --- a/explore-analyze/report-and-share/reporting-troubleshooting-csv.md +++ b/explore-analyze/report-and-share/reporting-troubleshooting-csv.md @@ -26,7 +26,7 @@ To work around the limitations, use filters to create multiple smaller reports, For more information on using Elasticsearch APIs directly, see [Scroll API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll), [Point in time API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time), [ES|QL](../query-filter/languages/esql-rest.md) or [SQL](../query-filter/languages/sql-rest-format.md#_csv) with CSV response data format. We recommend that you use an official Elastic language client: details for each programming language library that Elastic provides are in the [{{es}} Client documentation](https://www.elastic.co/guide/en/elasticsearch/client/index.html). -[Reporting parameters](https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html) can be adjusted to overcome some of these limiting scenarios. Results are dependent on data size, availability, and latency factors and are not guaranteed. +[Reporting parameters](asciidocalypse://docs/kibana/docs/reference/configuration-reference/reporting-settings.md) can be adjusted to overcome some of these limiting scenarios. Results are dependent on data size, availability, and latency factors and are not guaranteed. :::: @@ -43,7 +43,7 @@ The Kibana CSV export feature collects all of the data from Elasticsearch by usi 1. Permissions to read data aliases alone will not work: the permissions are needed on the underlying indices or data streams. 2. In cases where data shards are unavailable or time out, the export will be empty rather than returning partial data. -Some users may benefit from using the [scroll API](https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results), an alternative to paging through the data. The behavior of this API does not have the limitations of point in time API, however it has its own limitations: +Some users may benefit from using the [scroll API](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/paginate-search-results.md#scroll-search-results), an alternative to paging through the data. The behavior of this API does not have the limitations of point in time API, however it has its own limitations: 1. Search is limited to 500 shards at the very most. 2. In cases where the data shards are unavailable or time out, the export may return partial data. diff --git a/explore-analyze/report-and-share/reporting-troubleshooting.md b/explore-analyze/report-and-share/reporting-troubleshooting.md index 422d992adb..afa5a13795 100644 --- a/explore-analyze/report-and-share/reporting-troubleshooting.md +++ b/explore-analyze/report-and-share/reporting-troubleshooting.md @@ -49,7 +49,7 @@ These messages alone don’t indicate a problem. They show normal events that ha There are two primary causes for a "Max attempts reached" error: * You’re creating a PDF of a visualization or dashboard that spans a large amount of data and Kibana is hitting the `xpack.reporting.queue.timeout` -* Kibana is hosted behind a reverse-proxy, and the [Kibana server settings](https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html#reporting-kibana-server-settings) are not configured correctly +* Kibana is hosted behind a reverse-proxy, and the [Kibana server settings](asciidocalypse://docs/kibana/docs/reference/configuration-reference/reporting-settings.md#reporting-kibana-server-settings) are not configured correctly Create a Markdown visualization and then create a PDF report. If this succeeds, increase the `xpack.reporting.queue.timeout` setting. If the PDF report fails with "Max attempts reached," check your [Kibana server settings](https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html#reporting-kibana-server-settings). diff --git a/explore-analyze/scripting/dissect.md b/explore-analyze/scripting/dissect.md index a047772161..388a622b35 100644 --- a/explore-analyze/scripting/dissect.md +++ b/explore-analyze/scripting/dissect.md @@ -55,7 +55,7 @@ Now that you have a dissect pattern, how do you test and use it? ## Test dissect patterns with Painless [dissect-patterns-test] -You can incorporate dissect patterns into Painless scripts to extract data. To test your script, use either the [field contexts](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html#painless-execute-runtime-field-context) of the Painless execute API or create a runtime field that includes the script. Runtime fields offer greater flexibility and accept multiple documents, but the Painless execute API is a great option if you don’t have write access on a cluster where you’re testing a script. +You can incorporate dissect patterns into Painless scripts to extract data. To test your script, use either the [field contexts](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-api-examples.md#painless-execute-runtime-field-context) of the Painless execute API or create a runtime field that includes the script. Runtime fields offer greater flexibility and accept multiple documents, but the Painless execute API is a great option if you don’t have write access on a cluster where you’re testing a script. For example, test your dissect pattern with the Painless execute API by including your Painless script and a single document that matches your data. Start by indexing the `message` field as a `wildcard` data type: diff --git a/explore-analyze/scripting/grok.md b/explore-analyze/scripting/grok.md index 16e7ce5ee7..4110a8687e 100644 --- a/explore-analyze/scripting/grok.md +++ b/explore-analyze/scripting/grok.md @@ -46,14 +46,14 @@ The first value is a number, followed by what appears to be an IP address. You c To ease migration to the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current), a new set of ECS-compliant patterns is available in addition to the existing patterns. The new ECS pattern definitions capture event field names that are compliant with the schema. -The ECS pattern set has all of the pattern definitions from the legacy set, and is a drop-in replacement. Use the [`ecs-compatability`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html#plugins-filters-grok-ecs_compatibility) setting to switch modes. +The ECS pattern set has all of the pattern definitions from the legacy set, and is a drop-in replacement. Use the [`ecs-compatability`](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-grok.md#plugins-filters-grok-ecs_compatibility) setting to switch modes. New features and enhancements will be added to the ECS-compliant files. The legacy patterns may still receive bug fixes which are backwards compatible. ## Use grok patterns in Painless scripts [grok-patterns] -You can incorporate predefined grok patterns into Painless scripts to extract data. To test your script, use either the [field contexts](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html#painless-execute-runtime-field-context) of the Painless execute API or create a runtime field that includes the script. Runtime fields offer greater flexibility and accept multiple documents, but the Painless execute API is a great option if you don’t have write access on a cluster where you’re testing a script. +You can incorporate predefined grok patterns into Painless scripts to extract data. To test your script, use either the [field contexts](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-api-examples.md#painless-execute-runtime-field-context) of the Painless execute API or create a runtime field that includes the script. Runtime fields offer greater flexibility and accept multiple documents, but the Painless execute API is a great option if you don’t have write access on a cluster where you’re testing a script. ::::{tip} If you need help building grok patterns to match your data, use the [Grok Debugger](../query-filter/tools/grok-debugger.md) tool in {{kib}}. @@ -154,7 +154,7 @@ GET my-index/_search ## Return calculated results [grok-pattern-results] -Using the `http.clientip` runtime field, you can define a simple query to run a search for a specific IP address and return all related fields. The [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html) parameter on the `_search` API works for all fields, even those that weren’t sent as part of the original `_source`: +Using the `http.clientip` runtime field, you can define a simple query to run a search for a specific IP address and return all related fields. The [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md) parameter on the `_search` API works for all fields, even those that weren’t sent as part of the original `_source`: ```console GET my-index/_search diff --git a/explore-analyze/scripting/modules-scripting-engine.md b/explore-analyze/scripting/modules-scripting-engine.md index 47fe7ae435..060760baf8 100644 --- a/explore-analyze/scripting/modules-scripting-engine.md +++ b/explore-analyze/scripting/modules-scripting-engine.md @@ -10,7 +10,7 @@ mapped_pages: A `ScriptEngine` is a backend for implementing a scripting language. It may also be used to write scripts that need to use advanced internals of scripting. For example, a script that wants to use term frequencies while scoring. -The plugin [documentation](https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugin-authors.html) has more information on how to write a plugin so that Elasticsearch will properly load it. To register the `ScriptEngine`, your plugin should implement the `ScriptPlugin` interface and override the `getScriptEngine(Settings settings)` method. +The plugin [documentation](asciidocalypse://docs/elasticsearch/docs/extend/create-elasticsearch-plugins/index.md) has more information on how to write a plugin so that Elasticsearch will properly load it. To register the `ScriptEngine`, your plugin should implement the `ScriptPlugin` interface and override the `getScriptEngine(Settings settings)` method. The following is an example of a custom `ScriptEngine` which uses the language name `expert_scripts`. It implements a single script called `pure_df` which may be used as a search script to override each document’s score as the document frequency of a provided term. diff --git a/explore-analyze/scripting/modules-scripting-fields.md b/explore-analyze/scripting/modules-scripting-fields.md index 0ac0ed4119..8789a10dda 100644 --- a/explore-analyze/scripting/modules-scripting-fields.md +++ b/explore-analyze/scripting/modules-scripting-fields.md @@ -16,27 +16,27 @@ Depending on where a script is used, it will have access to certain special vari A script used in the [update](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update), [update-by-query](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query), or [reindex](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) API will have access to the `ctx` variable which exposes: `ctx._source` -: Access to the document [`_source` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html). +: Access to the document [`_source` field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md). `ctx.op` : The operation that should be applied to the document: `index` or `delete`. `ctx._index` etc -: Access to [document metadata fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html), some of which may be read-only. +: Access to [document metadata fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/document-metadata-fields.md), some of which may be read-only. These scripts do not have access to the `doc` variable and have to use `ctx` to access the documents they operate on. ## Search and aggregation scripts [_search_and_aggregation_scripts] -With the exception of [script fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#script-fields) which are executed once per search hit, scripts used in search and aggregations will be executed once for every document which might match a query or an aggregation. Depending on how many documents you have, this could mean millions or billions of executions: these scripts need to be fast! +With the exception of [script fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#script-fields) which are executed once per search hit, scripts used in search and aggregations will be executed once for every document which might match a query or an aggregation. Depending on how many documents you have, this could mean millions or billions of executions: these scripts need to be fast! Field values can be accessed from a script using [doc-values](#modules-scripting-doc-vals), [the `_source` field](#modules-scripting-source), or [stored fields](#modules-scripting-stored), each of which is explained below. ### Accessing the score of a document within a script [scripting-score] -Scripts used in the [`function_score` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html), in [script-based sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html), or in [aggregations](../query-filter/aggregations.md) have access to the `_score` variable which represents the current relevance score of a document. +Scripts used in the [`function_score` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-function-score-query.md), in [script-based sorting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/sort-search-results.md), or in [aggregations](../query-filter/aggregations.md) have access to the `_score` variable which represents the current relevance score of a document. Here’s an example of using a script in a [`function_score` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html) to alter the relevance `_score` of each document: @@ -76,7 +76,7 @@ GET my-index-000001/_search ### Accessing term statistics of a document within a script [scripting-term-statistics] -Scripts used in a [`script_score`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html) query have access to the `_termStats` variable which provides statistical information about the terms in the child query. +Scripts used in a [`script_score`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-score-query.md) query have access to the `_termStats` variable which provides statistical information about the terms in the child query. In the following example, `_termStats` is used within a [`script_score`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html) query to retrieve the average term frequency for the terms `quick`, `brown`, and `fox` in the `text` field: @@ -143,7 +143,7 @@ The `_termStats` variable is only available when using the [Painless](modules-sc ### Doc values [modules-scripting-doc-vals] -By far the fastest most efficient way to access a field value from a script is to use the `doc['field_name']` syntax, which retrieves the field value from [doc values](https://www.elastic.co/guide/en/elasticsearch/reference/current/doc-values.html). Doc values are a columnar field value store, enabled by default on all fields except for [analyzed `text` fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html). +By far the fastest most efficient way to access a field value from a script is to use the `doc['field_name']` syntax, which retrieves the field value from [doc values](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/doc-values.md). Doc values are a columnar field value store, enabled by default on all fields except for [analyzed `text` fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md). ```console PUT my-index-000001/_doc/1?refresh @@ -239,7 +239,7 @@ GET my-index-000001/_search ### Stored fields [modules-scripting-stored] -*Stored fields* — fields explicitly marked as [`"store": true`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-store.html) in the mapping — can be accessed using the `_fields['field_name'].value` or `_fields['field_name']` syntax: +*Stored fields* — fields explicitly marked as [`"store": true`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-store.md) in the mapping — can be accessed using the `_fields['field_name'].value` or `_fields['field_name']` syntax: ```console PUT my-index-000001 diff --git a/explore-analyze/scripting/modules-scripting-painless.md b/explore-analyze/scripting/modules-scripting-painless.md index e1e457cf94..a0da4eda76 100644 --- a/explore-analyze/scripting/modules-scripting-painless.md +++ b/explore-analyze/scripting/modules-scripting-painless.md @@ -22,4 +22,4 @@ Painless provides numerous capabilities that center around the following core pr Ready to start scripting with Painless? Learn how to [write your first script](modules-scripting-using.md). -If you’re already familiar with Painless, see the [Painless Language Specification](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-lang-spec.html) for a detailed description of the Painless syntax and language features. +If you’re already familiar with Painless, see the [Painless Language Specification](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-language-specification.md) for a detailed description of the Painless syntax and language features. diff --git a/explore-analyze/scripting/modules-scripting-security.md b/explore-analyze/scripting/modules-scripting-security.md index b06d04ee34..2a45f6702c 100644 --- a/explore-analyze/scripting/modules-scripting-security.md +++ b/explore-analyze/scripting/modules-scripting-security.md @@ -16,9 +16,9 @@ The second layer of security is the [Java Security Manager](https://www.oracle.c {{es}} uses [seccomp](https://en.wikipedia.org/wiki/Seccomp) in Linux, [Seatbelt](https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design) in macOS, and [ActiveProcessLimit](https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147) on Windows as additional security layers to prevent {{es}} from forking or running other processes. -Finally, scripts used in [scripted metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) can be restricted to a defined list of scripts, or forbidden altogether. This can prevent users from running particularly slow or resource intensive aggregation queries. +Finally, scripts used in [scripted metrics aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) can be restricted to a defined list of scripts, or forbidden altogether. This can prevent users from running particularly slow or resource intensive aggregation queries. -You can modify the following script settings to restrict the type of scripts that are allowed to run, and control the available [contexts](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html) that scripts can run in. To implement additional layers in your defense in depth strategy, follow the [{{es}} security principles](../../deploy-manage/security.md). +You can modify the following script settings to restrict the type of scripts that are allowed to run, and control the available [contexts](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-contexts.md) that scripts can run in. To implement additional layers in your defense in depth strategy, follow the [{{es}} security principles](../../deploy-manage/security.md). ## Allowed script types setting [allowed-script-types-setting] @@ -50,7 +50,7 @@ script.allowed_contexts: score, update ## Allowed scripts in scripted metrics aggregations [allowed-script-in-aggs-settings] -By default, all scripts are permitted in [scripted metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html). To restrict the set of allowed scripts, set [`search.aggs.only_allowed_metric_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-settings.html#search-settings-only-allowed-scripts) to `true` and provide the allowed scripts using [`search.aggs.allowed_inline_metric_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-settings.html#search-settings-allowed-inline-scripts) and/or [`search.aggs.allowed_stored_metric_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-settings.html#search-settings-allowed-stored-scripts). +By default, all scripts are permitted in [scripted metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html). To restrict the set of allowed scripts, set [`search.aggs.only_allowed_metric_scripts`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/search-settings.md#search-settings-only-allowed-scripts) to `true` and provide the allowed scripts using [`search.aggs.allowed_inline_metric_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-settings.html#search-settings-allowed-inline-scripts) and/or [`search.aggs.allowed_stored_metric_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-settings.html#search-settings-allowed-stored-scripts). To disallow certain script types, omit the corresponding script list (`search.aggs.allowed_inline_metric_scripts` or `search.aggs.allowed_stored_metric_scripts`) or set it to an empty array. When both script lists are not empty, the given stored scripts and the given inline scripts will be allowed. diff --git a/explore-analyze/scripting/modules-scripting-using.md b/explore-analyze/scripting/modules-scripting-using.md index b56848ea14..23832aa54a 100644 --- a/explore-analyze/scripting/modules-scripting-using.md +++ b/explore-analyze/scripting/modules-scripting-using.md @@ -34,7 +34,7 @@ Wherever scripting is supported in the {{es}} APIs, the syntax follows the same A Painless script is structured as one or more statements and optionally has one or more user-defined functions at the beginning. A script must always have at least one statement. -The [Painless execute API](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html) provides the ability to test a script with simple user-defined parameters and receive a result. Let’s start with a complete script and review its constituent parts. +The [Painless execute API](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-api-examples.md) provides the ability to test a script with simple user-defined parameters and receive a result. Let’s start with a complete script and review its constituent parts. First, index a document with a single field so that we have some data to work with: @@ -45,7 +45,7 @@ PUT my-index-000001/_doc/1 } ``` -We can then construct a script that operates on that field and run evaluate the script as part of a query. The following query uses the [`script_fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#script-fields) parameter of the search API to retrieve a script valuation. There’s a lot happening here, but we’ll break it down the components to understand them individually. For now, you only need to understand that this script takes `my_field` and operates on it. +We can then construct a script that operates on that field and run evaluate the script as part of a query. The following query uses the [`script_fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#script-fields) parameter of the search API to retrieve a script valuation. There’s a lot happening here, but we’ll break it down the components to understand them individually. For now, you only need to understand that this script takes `my_field` and operates on it. ```console GET my-index-000001/_search diff --git a/explore-analyze/scripting/scripting-field-extraction.md b/explore-analyze/scripting/scripting-field-extraction.md index 9a97dcab68..6bf983c52b 100644 --- a/explore-analyze/scripting/scripting-field-extraction.md +++ b/explore-analyze/scripting/scripting-field-extraction.md @@ -246,7 +246,7 @@ The following pattern tells dissect to return the term `used`, a blank space, th emit("used" + ' ' + gc.usize + ', ' + "capacity" + ' ' + gc.csize + ', ' + "committed" + ' ' + gc.comsize) ``` -Putting it all together, you can create a runtime field named `gc_size` in a search request. Using the [`fields` option](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#search-fields-param), you can retrieve all values for the `gc_size` runtime field. This query also includes a bucket aggregation to group your data. +Putting it all together, you can create a runtime field named `gc_size` in a search request. Using the [`fields` option](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#search-fields-param), you can retrieve all values for the `gc_size` runtime field. This query also includes a bucket aggregation to group your data. ```console GET my-index/_search diff --git a/explore-analyze/scripting/scripts-search-speed.md b/explore-analyze/scripting/scripts-search-speed.md index c14c6c5526..a07899afb3 100644 --- a/explore-analyze/scripting/scripts-search-speed.md +++ b/explore-analyze/scripting/scripts-search-speed.md @@ -72,7 +72,7 @@ PUT /my_test_scores/_mapping } ``` -Next, use an [ingest pipeline](../../manage-data/ingest/transform-enrich/ingest-pipelines.md) containing the [script processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/script-processor.html) to calculate the sum of `math_score` and `verbal_score` and index it in the `total_score` field. +Next, use an [ingest pipeline](../../manage-data/ingest/transform-enrich/ingest-pipelines.md) containing the [script processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/script-processor.md) to calculate the sum of `math_score` and `verbal_score` and index it in the `total_score` field. ```console PUT _ingest/pipeline/my_test_scores_pipeline diff --git a/explore-analyze/transforms/ecommerce-transforms.md b/explore-analyze/transforms/ecommerce-transforms.md index 6d65038eff..bd9ce70399 100644 --- a/explore-analyze/transforms/ecommerce-transforms.md +++ b/explore-analyze/transforms/ecommerce-transforms.md @@ -8,7 +8,7 @@ mapped_pages: # Tutorial: Transforming the eCommerce sample data [ecommerce-transforms] -[{{transforms-cap}}](../transforms.md) enable you to retrieve information from an {{es}} index, transform it, and store it in another index. Let’s use the [{{kib}} sample data](https://www.elastic.co/guide/en/kibana/current/get-started.html) to demonstrate how you can pivot and summarize your data with {{transforms}}. +[{{transforms-cap}}](../transforms.md) enable you to retrieve information from an {{es}} index, transform it, and store it in another index. Let’s use the [{{kib}} sample data](/explore-analyze/index.md) to demonstrate how you can pivot and summarize your data with {{transforms}}. 1. Verify that your environment is set up properly to use {{transforms}}. If the {{es}} {{security-features}} are enabled, to complete this tutorial you need a user that has authority to preview and create {{transforms}}. You must also have specific index privileges for the source and destination indices. See [Setup](transform-setup.md). 2. Choose your *source index*. @@ -27,7 +27,7 @@ mapped_pages: :class: screenshot ::: - Group the data by customer ID and add one or more aggregations to learn more about each customer’s orders. For example, let’s calculate the sum of products they purchased, the total price of their purchases, the maximum number of products that they purchased in a single order, and their total number of orders. We’ll accomplish this by using the [`sum` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html) on the `total_quantity` and `taxless_total_price` fields, the [`max` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html) on the `total_quantity` field, and the [`cardinality` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html) on the `order_id` field: + Group the data by customer ID and add one or more aggregations to learn more about each customer’s orders. For example, let’s calculate the sum of products they purchased, the total price of their purchases, the maximum number of products that they purchased in a single order, and their total number of orders. We’ll accomplish this by using the [`sum` aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-sum-aggregation.md) on the `total_quantity` and `taxless_total_price` fields, the [`max` aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-max-aggregation.md) on the `total_quantity` field, and the [`cardinality` aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-cardinality-aggregation.md) on the `order_id` field: :::{image} ../../images/elasticsearch-reference-ecommerce-pivot2.png :alt: Adding multiple aggregations to a {{transform}} in {{kib}} @@ -171,7 +171,7 @@ mapped_pages: :::: 5. Optional: Create the destination index. - If the destination index does not exist, it is created the first time you start your {{transform}}. A pivot transform deduces the mappings for the destination index from the source indices and the transform aggregations. If there are fields in the destination index that are derived from scripts (for example, if you use [`scripted_metrics`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) or [`bucket_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html) aggregations), they’re created with [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md). You can use the preview {{transform}} API to preview the mappings it will use for the destination index. In {{kib}}, if you copied the API request to your clipboard, paste it into the console, then refer to the `generated_dest_index` object in the API response. + If the destination index does not exist, it is created the first time you start your {{transform}}. A pivot transform deduces the mappings for the destination index from the source indices and the transform aggregations. If there are fields in the destination index that are derived from scripts (for example, if you use [`scripted_metrics`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) or [`bucket_scripts`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md) aggregations), they’re created with [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md). You can use the preview {{transform}} API to preview the mappings it will use for the destination index. In {{kib}}, if you copied the API request to your clipboard, paste it into the console, then refer to the `generated_dest_index` object in the API response. ::::{note} {{transforms-cap}} might have more configuration options provided by the APIs than the options available in {{kib}}. For example, you can set an ingest pipeline for `dest` by calling the [Create {{transform}}](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform). For all the {{transform}} configuration options, refer to the [documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-transform). :::: diff --git a/explore-analyze/transforms/transform-checkpoints.md b/explore-analyze/transforms/transform-checkpoints.md index caaea24411..0f6c58a729 100644 --- a/explore-analyze/transforms/transform-checkpoints.md +++ b/explore-analyze/transforms/transform-checkpoints.md @@ -39,9 +39,9 @@ If the cluster experiences unsuitable performance degradation due to the {{trans ## Using the ingest timestamp for syncing the {{transform}} [sync-field-ingest-timestamp] -In most cases, it is strongly recommended to use the ingest timestamp of the source indices for syncing the {{transform}}. This is the most optimal way for {{transforms}} to be able to identify new changes. If your data source follows the [ECS standard](https://www.elastic.co/guide/en/ecs/current/ecs-reference.html), you might already have an [`event.ingested`](https://www.elastic.co/guide/en/ecs/current/ecs-event.html#field-event-ingested) field. In this case, use `event.ingested` as the `sync`.`time`.`field` property of your {{transform}}. +In most cases, it is strongly recommended to use the ingest timestamp of the source indices for syncing the {{transform}}. This is the most optimal way for {{transforms}} to be able to identify new changes. If your data source follows the [ECS standard](asciidocalypse://docs/ecs/docs/reference/ecs/index.md), you might already have an [`event.ingested`](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-event.md#field-event-ingested) field. In this case, use `event.ingested` as the `sync`.`time`.`field` property of your {{transform}}. -If you don’t have a `event.ingested` field or it isn’t populated, you can set it by using an ingest pipeline. Create an ingest pipeline either using the [ingest pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) (like the example below) or via {{kib}} under **Stack Management > Ingest Pipelines**. Use a [`set` processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/set-processor.html) to set the field and associate it with the value of the ingest timestamp. +If you don’t have a `event.ingested` field or it isn’t populated, you can set it by using an ingest pipeline. Create an ingest pipeline either using the [ingest pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) (like the example below) or via {{kib}} under **Stack Management > Ingest Pipelines**. Use a [`set` processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/set-processor.md) to set the field and associate it with the value of the ingest timestamp. ```console PUT _ingest/pipeline/set_ingest_time diff --git a/explore-analyze/transforms/transform-examples.md b/explore-analyze/transforms/transform-examples.md index 45e9b67d26..8dbef30bdf 100644 --- a/explore-analyze/transforms/transform-examples.md +++ b/explore-analyze/transforms/transform-examples.md @@ -9,7 +9,7 @@ mapped_pages: # Examples [transform-examples] -These examples demonstrate how to use {{transforms}} to derive useful insights from your data. All the examples use one of the [{{kib}} sample datasets](https://www.elastic.co/guide/en/kibana/current/get-started.html). For a more detailed, step-by-step example, see [Tutorial: Transforming the eCommerce sample data](ecommerce-transforms.md). +These examples demonstrate how to use {{transforms}} to derive useful insights from your data. All the examples use one of the [{{kib}} sample datasets](/explore-analyze/index.md). For a more detailed, step-by-step example, see [Tutorial: Transforming the eCommerce sample data](ecommerce-transforms.md). * [Finding your best customers](#example-best-customers) * [Finding air carriers with the most delays](#example-airline) @@ -94,7 +94,7 @@ It’s possible to answer these questions using aggregations alone, however {{tr ## Finding air carriers with the most delays [example-airline] -This example uses the Flights sample data set to find out which air carrier had the most delays. First, filter the source data such that it excludes all the cancelled flights by using a query filter. Then transform the data to contain the distinct number of flights, the sum of delayed minutes, and the sum of the flight minutes by air carrier. Finally, use a [`bucket_script`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html) to determine what percentage of the flight time was actually delay. +This example uses the Flights sample data set to find out which air carrier had the most delays. First, filter the source data such that it excludes all the cancelled flights by using a query filter. Then transform the data to contain the distinct number of flights, the sum of delayed minutes, and the sum of the flight minutes by air carrier. Finally, use a [`bucket_script`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md) to determine what percentage of the flight time was actually delay. ```console POST _transform/_preview @@ -415,9 +415,9 @@ This {{transform}} makes it easier to answer questions such as: ## Finding client IPs that sent the most bytes to the server [example-bytes] -This example uses the web log sample data set to find the client IP that sent the most bytes to the server in every hour. The example uses a `pivot` {{transform}} with a [`top_metrics`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-metrics.html) aggregation. +This example uses the web log sample data set to find the client IP that sent the most bytes to the server in every hour. The example uses a `pivot` {{transform}} with a [`top_metrics`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-top-metrics.md) aggregation. -Group the data by a [date histogram](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html#_date_histogram) on the time field with an interval of one hour. Use a [max aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html) on the `bytes` field to get the maximum amount of data that is sent to the server. Without the `max` aggregation, the API call still returns the client IP that sent the most bytes, however, the amount of bytes that it sent is not returned. In the `top_metrics` property, specify `clientip` and `geo.src`, then sort them by the `bytes` field in descending order. The {{transform}} returns the client IP that sent the biggest amount of data and the 2-letter ISO code of the corresponding location. +Group the data by a [date histogram](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-composite-aggregation.md#_date_histogram) on the time field with an interval of one hour. Use a [max aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-max-aggregation.md) on the `bytes` field to get the maximum amount of data that is sent to the server. Without the `max` aggregation, the API call still returns the client IP that sent the most bytes, however, the amount of bytes that it sent is not returned. In the `top_metrics` property, specify `clientip` and `geo.src`, then sort them by the `bytes` field in descending order. The {{transform}} returns the client IP that sent the biggest amount of data and the 2-letter ISO code of the corresponding location. ```console POST _transform/_preview diff --git a/explore-analyze/transforms/transform-limitations.md b/explore-analyze/transforms/transform-limitations.md index 9cea4ef14b..ef1f6d5355 100644 --- a/explore-analyze/transforms/transform-limitations.md +++ b/explore-analyze/transforms/transform-limitations.md @@ -49,7 +49,7 @@ A {{ctransform}} periodically checks for changes to source data. The functionali ### Aggregation responses may be incompatible with destination index mappings [transform-aggresponse-limitations] -When a pivot {{transform}} is first started, it will deduce the mappings required for the destination index. This process is based on the field types of the source index and the aggregations used. If the fields are derived from [`scripted_metrics`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) or [`bucket_scripts`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html), [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md) will be used. In some instances the deduced mappings may be incompatible with the actual data. For example, numeric overflows might occur or dynamically mapped fields might contain both numbers and strings. Please check {{es}} logs if you think this may have occurred. +When a pivot {{transform}} is first started, it will deduce the mappings required for the destination index. This process is based on the field types of the source index and the aggregations used. If the fields are derived from [`scripted_metrics`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) or [`bucket_scripts`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md), [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md) will be used. In some instances the deduced mappings may be incompatible with the actual data. For example, numeric overflows might occur or dynamically mapped fields might contain both numbers and strings. Please check {{es}} logs if you think this may have occurred. You can view the deduced mappings by using the [preview transform API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform). See the `generated_dest_index` object in the API response. @@ -57,7 +57,7 @@ If it’s required, you may define custom mappings prior to starting the {{trans ### Batch {{transforms}} may not account for changed documents [transform-batch-limitations] -A batch {{transform}} uses a [composite aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html) which allows efficient pagination through all buckets. Composite aggregations do not yet support a search context, therefore if the source data is changed (deleted, updated, added) while the batch {{dataframe}} is in progress, then the results may not include these changes. +A batch {{transform}} uses a [composite aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-composite-aggregation.md) which allows efficient pagination through all buckets. Composite aggregations do not yet support a search context, therefore if the source data is changed (deleted, updated, added) while the batch {{dataframe}} is in progress, then the results may not include these changes. ### {{ctransform-cap}} consistency does not account for deleted or updated documents [transform-consistency-limitations] @@ -77,7 +77,7 @@ When deleting a {{transform}} using `DELETE _transform/index` neither the destin During the development of {{transforms}}, control was favoured over performance. In the design considerations, it is preferred for the {{transform}} to take longer to complete quietly in the background rather than to finish quickly and take precedence in resource consumption. -Composite aggregations are well suited for high cardinality data enabling pagination through results. If a [circuit breaker](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html) memory exception occurs when performing the composite aggregated search then we try again reducing the number of buckets requested. This circuit breaker is calculated based upon all activity within the cluster, not just activity from {{transforms}}, so it therefore may only be a temporary resource availability issue. +Composite aggregations are well suited for high cardinality data enabling pagination through results. If a [circuit breaker](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md) memory exception occurs when performing the composite aggregated search then we try again reducing the number of buckets requested. This circuit breaker is calculated based upon all activity within the cluster, not just activity from {{transforms}}, so it therefore may only be a temporary resource availability issue. For a batch {{transform}}, the number of buckets requested is only ever adjusted downwards. The lowering of value may result in a longer duration for the {{transform}} checkpoint to complete. For {{ctransforms}}, the number of buckets requested is reset back to its default at the start of every checkpoint and it is possible for circuit breaker exceptions to occur repeatedly in the {{es}} logs. @@ -85,11 +85,11 @@ The {{transform}} retrieves data in batches which means it calculates several bu ### Handling dynamic adjustments for many terms [transform-dynamic-adjustments-limitations] -For each checkpoint, entities are identified that have changed since the last time the check was performed. This list of changed entities is supplied as a [terms query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html) to the {{transform}} composite aggregation, one page at a time. Then updates are applied to the destination index for each page of entities. +For each checkpoint, entities are identified that have changed since the last time the check was performed. This list of changed entities is supplied as a [terms query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-terms-query.md) to the {{transform}} composite aggregation, one page at a time. Then updates are applied to the destination index for each page of entities. The page `size` is defined by `max_page_search_size` which is also used to define the number of buckets returned by the composite aggregation search. The default value is 500, the minimum is 10. -The index setting [`index.max_terms_count`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) defines the maximum number of terms that can be used in a terms query. The default value is 65536. If `max_page_search_size` exceeds `index.max_terms_count` the {{transform}} will fail. +The index setting [`index.max_terms_count`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-settings) defines the maximum number of terms that can be used in a terms query. The default value is 65536. If `max_page_search_size` exceeds `index.max_terms_count` the {{transform}} will fail. Using smaller values for `max_page_search_size` may result in a longer duration for the {{transform}} checkpoint to complete. @@ -109,7 +109,7 @@ If using a `sync.time.field` that represents the data ingest time and using a ze ### Support for date nanoseconds data type [transform-date-nanos] -If your data uses the [date nanosecond data type](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html), aggregations are nonetheless on millisecond resolution. This limitation also affects the aggregations in your {{transforms}}. +If your data uses the [date nanosecond data type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md), aggregations are nonetheless on millisecond resolution. This limitation also affects the aggregations in your {{transforms}}. ### Data streams as destination indices are not supported [transform-data-streams-destination] @@ -119,7 +119,7 @@ If your data uses the [date nanosecond data type](https://www.elastic.co/guide/e [ILM](../../manage-data/lifecycle/index-lifecycle-management.md) is not recommended to use as a {{transform}} destination index. {{transforms-cap}} update documents in the current destination, and cannot delete documents in the indices previously used by ILM. This may lead to duplicated documents when you use {{transforms}} combined with ILM in case of a rollover. -If you use ILM to have time-based indices, please consider using the [Date index name](https://www.elastic.co/guide/en/elasticsearch/reference/current/date-index-name-processor.html) instead. The processor works without duplicated documents if your {{transform}} contains a `group_by` based on `date_histogram`. +If you use ILM to have time-based indices, please consider using the [Date index name](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/date-index-name-processor.md) instead. The processor works without duplicated documents if your {{transform}} contains a `group_by` based on `date_histogram`. ## Limitations in {{kib}} [transform-ui-limitations] diff --git a/explore-analyze/transforms/transform-painless-examples.md b/explore-analyze/transforms/transform-painless-examples.md index 6ef9b2a155..59881c7e8a 100644 --- a/explore-analyze/transforms/transform-painless-examples.md +++ b/explore-analyze/transforms/transform-painless-examples.md @@ -13,7 +13,7 @@ mapped_pages: The examples that use the `scripted_metric` aggregation are not supported on {{es}} Serverless. :::: -These examples demonstrate how to use Painless in {{transforms}}. You can learn more about the Painless scripting language in the [Painless guide](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-guide.html). +These examples demonstrate how to use Painless in {{transforms}}. You can learn more about the Painless scripting language in the [Painless guide](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless.md). * [Getting top hits by using scripted metric aggregation](#painless-top-hits) * [Getting time features by using aggregations](#painless-time-features) @@ -31,7 +31,7 @@ These examples demonstrate how to use Painless in {{transforms}}. You can learn ## Getting top hits by using scripted metric aggregation [painless-top-hits] -This snippet shows how to find the latest document, in other words the document with the latest timestamp. From a technical perspective, it helps to achieve the function of a [Top hits](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html) by using scripted metric aggregation in a {{transform}}, which provides a metric output. +This snippet shows how to find the latest document, in other words the document with the latest timestamp. From a technical perspective, it helps to achieve the function of a [Top hits](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-top-hits-aggregation.md) by using scripted metric aggregation in a {{transform}}, which provides a metric output. ::::{important} This example uses a `scripted_metric` aggregation which is not supported on {{es}} Serverless. @@ -66,7 +66,7 @@ This example uses a `scripted_metric` aggregation which is not supported on {{es 3. The `combine_script` returns `state` from each shard. 4. The `reduce_script` iterates through the value of `s.timestamp_latest` returned by each shard and returns the document with the latest timestamp (`last_doc`). In the response, the top hit (in other words, the `latest_doc`) is nested below the `latest_doc` field. -Check the [scope of scripts](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html#scripted-metric-aggregation-scope) for detailed explanation on the respective scripts. +Check the [scope of scripts](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md#scripted-metric-aggregation-scope) for detailed explanation on the respective scripts. You can retrieve the last value in a similar way: @@ -215,7 +215,7 @@ This snippet shows how to extract time based features by using Painless in a {{t ## Getting duration by using bucket script [painless-bucket-script] -This example shows you how to get the duration of a session by client IP from a data log by using [bucket script](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html). The example uses the {{kib}} sample web logs dataset. +This example shows you how to get the duration of a session by client IP from a data log by using [bucket script](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md). The example uses the {{kib}} sample web logs dataset. ```console PUT _transform/data_log diff --git a/explore-analyze/transforms/transform-scale.md b/explore-analyze/transforms/transform-scale.md index 9671fe8aed..4e8a21f475 100644 --- a/explore-analyze/transforms/transform-scale.md +++ b/explore-analyze/transforms/transform-scale.md @@ -55,7 +55,7 @@ Imagine your {{ctransform}} is configured to group by `IP` and calculate the sum To limit which historical indices are accessed, exclude certain tiers (for example `"must_not": { "terms": { "_tier": [ "data_frozen", "data_cold" ] } }` and/or use an absolute time value as a date range filter in your source query (for example, greater than 2024-01-01T00:00:00). If you use a relative time value (for example, gte now-30d/d) then ensure date rounding is applied to take advantage of query caching and ensure that the relative time is much larger than the largest of `frequency` or `time.sync.delay` or the date histogram bucket, otherwise data may be missed. Do not use date filters which are less than a date value (for example, `lt`: less than or `lte`: less than or equal to) as this conflicts with the logic applied at each checkpoint execution and data may be missed. -Consider using [date math](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#api-date-math-index-names) in your index names to reduce the number of indices to resolve in your queries. Add a date pattern - for example, `yyyy-MM-dd` - to your index names and use it to limit your query to a specific date. The example below queries indices only from yesterday and today: +Consider using [date math](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#api-date-math-index-names) in your index names to reduce the number of indices to resolve in your queries. Add a date pattern - for example, `yyyy-MM-dd` - to your index names and use it to limit your query to a specific date. The example below queries indices only from yesterday and today: ```js "source": { @@ -88,7 +88,7 @@ Index sorting enables you to store documents on disk in a specific order which c ## 9. Disable the `_source` field on the destination index (storage) [disable-source-dest] -The [`_source` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html) contains the original JSON document body that was passed at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is still stored in the index and incurs a storage overhead. Consider disabling `_source` to save storage space if you have a large destination index. Disabling `_source` is only possible during index creation. +The [`_source` field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md) contains the original JSON document body that was passed at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is still stored in the index and incurs a storage overhead. Consider disabling `_source` to save storage space if you have a large destination index. Disabling `_source` is only possible during index creation. ::::{note} When the `_source` field is disabled, a number of features are not supported. Consult [Disabling the `_source` field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#disable-source-field) to understand the consequences before disabling it. diff --git a/explore-analyze/transforms/transform-usage.md b/explore-analyze/transforms/transform-usage.md index c2ba2a4305..5b8bca5834 100644 --- a/explore-analyze/transforms/transform-usage.md +++ b/explore-analyze/transforms/transform-usage.md @@ -18,11 +18,11 @@ You might want to consider using {{transforms}} instead of aggregations when: In {{ml}}, you often need a complete set of behavioral features rather just the top-N. For example, if you are predicting customer churn, you might look at features such as the number of website visits in the last week, the total number of sales, or the number of emails sent. The {{stack}} {{ml-features}} create models based on this multi-dimensional feature space, so they benefit from the full feature indices that are created by {{transforms}}. - This scenario also applies when you are trying to search across the results of an aggregation or multiple aggregations. Aggregation results can be ordered or filtered, but there are [limitations to ordering](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) and [filtering by bucket selector](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html) is constrained by the maximum number of buckets returned. If you want to search all aggregation results, you need to create the complete {{dataframe}}. If you need to sort or filter the aggregation results by multiple fields, {{transforms}} are particularly useful. + This scenario also applies when you are trying to search across the results of an aggregation or multiple aggregations. Aggregation results can be ordered or filtered, but there are [limitations to ordering](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md#search-aggregations-bucket-terms-aggregation-order) and [filtering by bucket selector](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-pipeline-bucket-selector-aggregation.md) is constrained by the maximum number of buckets returned. If you want to search all aggregation results, you need to create the complete {{dataframe}}. If you need to sort or filter the aggregation results by multiple fields, {{transforms}} are particularly useful. * You need to sort aggregation results by a pipeline aggregation. - [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html) cannot be used for sorting. Technically, this is because pipeline aggregations are run during the reduce phase after all other aggregations have already completed. If you create a {{transform}}, you can effectively perform multiple passes over the data. + [Pipeline aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/pipeline.md) cannot be used for sorting. Technically, this is because pipeline aggregations are run during the reduce phase after all other aggregations have already completed. If you create a {{transform}}, you can effectively perform multiple passes over the data. * You want to create summary tables to optimize queries. diff --git a/explore-analyze/visualize.md b/explore-analyze/visualize.md index a0911fb6af..1366463326 100644 --- a/explore-analyze/visualize.md +++ b/explore-analyze/visualize.md @@ -18,7 +18,7 @@ $$$panels-editors$$$ | --- | --- | --- | | Visualizations | | | | [Lens](visualize/lens.md) | The default editor for creating powerful [charts](visualize/supported-chart-types.md) in {{kib}} | -| [ES|QL](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-kibana.html) | Create visualizations from ES|QL queries | +| [ES|QL](/explore-analyze/query-filter/languages/esql-kibana.md) | Create visualizations from ES|QL queries | | [Maps](visualize/maps.md) | Create beautiful displays of your geographical data | | [Field statistics](visualize/field-statistics.md) | Add a field statistics view of your data to your dashboards | | [Custom visualizations](visualize/custom-visualizations-with-vega.md) | Use Vega to create new types of visualizations | @@ -32,7 +32,7 @@ $$$panels-editors$$$ | [Single metric viewer](machine-learning/machine-learning-in-kibana/xpack-ml-anomalies.md) | Display an anomaly chart from the **Single Metric Viewer** | | [Change point detection](machine-learning/machine-learning-in-kibana/xpack-ml-aiops.md#change-point-detection) | Display a chart to visualize change points in your data | | Observability | | | -| [SLO overview](https://www.elastic.co/guide/en/observability/current/slo.html) | Visualize a selected SLO’s health, including name, current SLI value, target, and status | +| [SLO overview](/solutions/observability/incident-management/service-level-objectives-slos.md) | Visualize a selected SLO’s health, including name, current SLI value, target, and status | | [SLO Alerts](https://www.elastic.co/guide/en/observability/current/slo.html) | Visualize one or more SLO alerts, including status, rule name, duration, and reason. In addition, configure and update alerts, or create cases directly from the panel | | [SLO Error Budget](https://www.elastic.co/guide/en/observability/current/slo.html) | Visualize the consumption of your SLO’s error budget | | Legacy | | | diff --git a/explore-analyze/visualize/canvas/canvas-tutorial.md b/explore-analyze/visualize/canvas/canvas-tutorial.md index 601674ecbf..735488757f 100644 --- a/explore-analyze/visualize/canvas/canvas-tutorial.md +++ b/explore-analyze/visualize/canvas/canvas-tutorial.md @@ -118,7 +118,7 @@ To focus your data on a specific time range, add the time filter. 2. Click **Display** 3. To use the date time field from the sample data, enter `order_date` in the **Column** field, then click **Set**. -% image doesn't exist (also broken in asciidoc https://www.elastic.co/guide/en/kibana/current/canvas-tutorial.html#_show_how_your_data_changes_over_time) +% image doesn't exist (also broken in asciidoc /explore-analyze/visualize/canvas/canvas-tutorial.md#_show_how_your_data_changes_over_time) % :::{image} ../../../images/kibana-canvas_tutorialCustomTimeFilter_7.17.0.png % :alt: Custom time filter added to the workpad @@ -136,6 +136,6 @@ Now that you know the basics, you’re ready to explore on your own. Here are some things to try: -* Play with the [sample Canvas workpads](https://www.elastic.co/guide/en/kibana/current/get-started.html). +* Play with the [sample Canvas workpads](/explore-analyze/index.md). * Build presentations of your own data with [workpads](../canvas.md#create-workpads). * Deep dive into the [expression language and functions](canvas-function-reference.md) that drive **Canvas**. diff --git a/explore-analyze/visualize/custom-visualizations-with-vega.md b/explore-analyze/visualize/custom-visualizations-with-vega.md index d7ddf7f674..d999bf5866 100644 --- a/explore-analyze/visualize/custom-visualizations-with-vega.md +++ b/explore-analyze/visualize/custom-visualizations-with-vega.md @@ -116,7 +116,7 @@ POST kibana_sample_data_ecommerce/_search } ``` -Add the [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html), then click **Click to send request**: +Add the [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md), then click **Click to send request**: ```js POST kibana_sample_data_ecommerce/_search diff --git a/explore-analyze/visualize/esorql.md b/explore-analyze/visualize/esorql.md index 78172334b0..5a8338c9a6 100644 --- a/explore-analyze/visualize/esorql.md +++ b/explore-analyze/visualize/esorql.md @@ -24,7 +24,7 @@ You can then **Save** and add it to an existing or a new dashboard using the sav 2. Choose **ES|QL** under **Visualizations**. An ES|QL editor appears and lets you configure your query and its associated visualization. The **Suggestions** panel can help you find alternative ways to configure the visualization. ::::{tip} - Check the [ES|QL reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-language.html) to get familiar with the syntax and optimize your query. + Check the [ES|QL reference](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql.md) to get familiar with the syntax and optimize your query. :::: 3. When editing your query or its configuration, run the query to update the preview of the visualization. diff --git a/explore-analyze/visualize/field-statistics.md b/explore-analyze/visualize/field-statistics.md index aef78fb927..48e30af2ae 100644 --- a/explore-analyze/visualize/field-statistics.md +++ b/explore-analyze/visualize/field-statistics.md @@ -14,7 +14,7 @@ mapped_pages: 2. Choose **Field statistics** under **Visualizations**. An ES|QL editor appears and lets you configure your query with the fields and information that you want to show. ::::{tip} - Check the [ES|QL reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-language.html) to get familiar with the syntax and optimize your query. + Check the [ES|QL reference](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql.md) to get familiar with the syntax and optimize your query. :::: 3. When editing your query or its configuration, run the query to update the preview of the visualization. diff --git a/explore-analyze/visualize/graph/graph-troubleshooting.md b/explore-analyze/visualize/graph/graph-troubleshooting.md index 9f79dcc987..41844185f2 100644 --- a/explore-analyze/visualize/graph/graph-troubleshooting.md +++ b/explore-analyze/visualize/graph/graph-troubleshooting.md @@ -35,7 +35,7 @@ With the default setting of `use_significance` set to `true`, the Graph API perf If your data is noisy and you need to filter based on significance, you can reduce the number of frequency checks by: * Reducing the `sample_size`. Considering fewer documents can actually be better when the quality of matches is quite variable. -* Avoiding noisy documents that have a large number of terms. You can do this by either allowing ranking to naturally favor shorter documents in the top-results sample (see [enabling norms](https://www.elastic.co/guide/en/elasticsearch/reference/current/norms.html)) or by explicitly excluding large documents with your seed and guiding queries. +* Avoiding noisy documents that have a large number of terms. You can do this by either allowing ranking to naturally favor shorter documents in the top-results sample (see [enabling norms](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/norms.md)) or by explicitly excluding large documents with your seed and guiding queries. * Increasing the frequency threshold. Many many terms occur very infrequently so even increasing the frequency threshold by one can massively reduce the number of candidate terms whose background frequencies are checked. Keep in mind that all of these options reduce the scope of information analyzed and can increase the potential to miss what could be interesting details. However, the information that’s lost tends to be associated with lower-quality documents with lower-frequency terms, which can be an acceptable trade-off. diff --git a/explore-analyze/visualize/legacy-editors/timelion.md b/explore-analyze/visualize/legacy-editors/timelion.md index 82e37eb282..06bec73fcd 100644 --- a/explore-analyze/visualize/legacy-editors/timelion.md +++ b/explore-analyze/visualize/legacy-editors/timelion.md @@ -80,7 +80,7 @@ You collected data from your operating system using Metricbeat, and you want to Set up Metricbeat, then create the dashboard. -1. To set up Metricbeat, go to [Metricbeat quick start: installation and configuration](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html) +1. To set up Metricbeat, go to [Metricbeat quick start: installation and configuration](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-installation-configuration.md) 2. Go to **Dashboards**. 3. On the **Dashboards** page, click **Create dashboard**. diff --git a/explore-analyze/visualize/legacy-editors/tsvb.md b/explore-analyze/visualize/legacy-editors/tsvb.md index 42412bc8a9..783965a7c1 100644 --- a/explore-analyze/visualize/legacy-editors/tsvb.md +++ b/explore-analyze/visualize/legacy-editors/tsvb.md @@ -37,7 +37,7 @@ When you use only {{data-sources}}, you are able to: ::::{important} :name: tsvb-index-patterns-mode -Creating **TSVB** visualizations with an {{es}} index string is deprecated and will be removed in a future release. By default, you create **TSVB** visualizations with only {{data-sources}}. To use an {{es}} index string, contact your administrator, or go to [Advanced Settings](https://www.elastic.co/guide/en/kibana/current/advanced-options.html) and set `metrics:allowStringIndices` to `true`. +Creating **TSVB** visualizations with an {{es}} index string is deprecated and will be removed in a future release. By default, you create **TSVB** visualizations with only {{data-sources}}. To use an {{es}} index string, contact your administrator, or go to [Advanced Settings](asciidocalypse://docs/kibana/docs/reference/advanced-settings.md) and set `metrics:allowStringIndices` to `true`. :::: diff --git a/explore-analyze/visualize/link-panels.md b/explore-analyze/visualize/link-panels.md index fee33b1052..f566d60c93 100644 --- a/explore-analyze/visualize/link-panels.md +++ b/explore-analyze/visualize/link-panels.md @@ -8,7 +8,7 @@ mapped_pages: # Link panels [dashboard-links] -You can use **Links** panels to create links to other dashboards or external websites. When creating links to other dashboards, you have the option to carry the time range, query, and filters to apply over to the linked dashboard. Links to external websites follow the [`externalUrl.policy`](https://www.elastic.co/guide/en/kibana/current/url-drilldown-settings-kb.html#external-URL-policy) settings. **Links** panels support vertical and horizontal layouts and may be saved to the **Library** for use in other dashboards. +You can use **Links** panels to create links to other dashboards or external websites. When creating links to other dashboards, you have the option to carry the time range, query, and filters to apply over to the linked dashboard. Links to external websites follow the [`externalUrl.policy`](asciidocalypse://docs/kibana/docs/reference/configuration-reference/url-drilldown-settings.md#external-URL-policy) settings. **Links** panels support vertical and horizontal layouts and may be saved to the **Library** for use in other dashboards. :::{image} ../../images/kibana-dashboard_links_panel.png :alt: A screenshot displaying the new links panel diff --git a/explore-analyze/visualize/maps/asset-tracking-tutorial.md b/explore-analyze/visualize/maps/asset-tracking-tutorial.md index 769f867fd2..0b7afc7f86 100644 --- a/explore-analyze/visualize/maps/asset-tracking-tutorial.md +++ b/explore-analyze/visualize/maps/asset-tracking-tutorial.md @@ -32,7 +32,7 @@ When you complete this tutorial, you’ll have a map that looks like this: * If you don’t already have {{kib}}, sign up for [a free Elastic Cloud trial](https://www.elastic.co/cloud/elasticsearch-service/signup?baymax=docs-body&elektra=docs) and create a hosted deployment. When creating it, download the deployment credentials. * Obtain an API key for [TriMet web services](https://developer.trimet.org/) at [https://developer.trimet.org/appid/registration/](https://developer.trimet.org/appid/registration/). -* [Fleet](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) is enabled on your cluster, and one or more [{{agent}}s](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) is enrolled. +* [Fleet](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) is enabled on your cluster, and one or more [{{agent}}s](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md) is enrolled. ## Part 1: Ingest the Portland public transport data [_part_1_ingest_the_portland_public_transport_data] @@ -725,7 +725,7 @@ For this example, you will set the rule to check every minute. However, when run 16. Click **Save**. -The **TriMet Alerts connector** is added to the **{{connectors-ui}}** page. For more information on common connectors, refer to the [Slack](https://www.elastic.co/guide/en/kibana/current/slack-action-type.html) and [Email](https://www.elastic.co/guide/en/kibana/current/email-action-type.html) connectors. +The **TriMet Alerts connector** is added to the **{{connectors-ui}}** page. For more information on common connectors, refer to the [Slack](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/slack-action-type.md) and [Email](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/email-action-type.md) connectors. ### Step 3. View alerts in real time [_step_3_view_alerts_in_real_time] diff --git a/explore-analyze/visualize/maps/heatmap-layer.md b/explore-analyze/visualize/maps/heatmap-layer.md index db233e0ed4..c7d88e56c8 100644 --- a/explore-analyze/visualize/maps/heatmap-layer.md +++ b/explore-analyze/visualize/maps/heatmap-layer.md @@ -15,7 +15,7 @@ Heat map layers cluster point data to show locations with higher densities. :class: screenshot ::: -To add a heat map layer to your map, click **Add layer**, then select **Heat map**. The index must contain at least one field mapped as [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html). +To add a heat map layer to your map, click **Add layer**, then select **Heat map**. The index must contain at least one field mapped as [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md). ::::{note} Only count, sum, unique count metric aggregations are available with the grid aggregation source and heat map layers. Average, min, and max are turned off because the heat map will blend nearby values. Blending two average values would make the cluster more prominent, even though it just might literally mean that these nearby areas are average. diff --git a/explore-analyze/visualize/maps/import-geospatial-data.md b/explore-analyze/visualize/maps/import-geospatial-data.md index fa12c1533c..9a5c3bef8c 100644 --- a/explore-analyze/visualize/maps/import-geospatial-data.md +++ b/explore-analyze/visualize/maps/import-geospatial-data.md @@ -8,7 +8,7 @@ mapped_pages: # Import geospatial data [import-geospatial-data] -To import geospatical data into the Elastic Stack, the data must be indexed as [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html). Geospatial data comes in many formats. Choose an import tool based on the format of your geospatial data. +To import geospatical data into the Elastic Stack, the data must be indexed as [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md). Geospatial data comes in many formats. Choose an import tool based on the format of your geospatial data. ## Security privileges [import-geospatial-privileges] @@ -114,7 +114,7 @@ To draw features: ## Upload data with IP addresses [_upload_data_with_ip_addresses] -The GeoIP processor adds information about the geographical location of IP addresses. See [GeoIP processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) for details. For private IP addresses, see [Enriching data with GeoIPs from internal, private IP addresses](https://www.elastic.co/blog/enriching-elasticsearch-data-geo-ips-internal-private-ip-addresses). +The GeoIP processor adds information about the geographical location of IP addresses. See [GeoIP processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) for details. For private IP addresses, see [Enriching data with GeoIPs from internal, private IP addresses](https://www.elastic.co/blog/enriching-elasticsearch-data-geo-ips-internal-private-ip-addresses). ## Upload data with GDAL [_upload_data_with_gdal] diff --git a/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md b/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md index 14b7a3c9c2..3a1e460dd6 100644 --- a/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md +++ b/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md @@ -50,7 +50,7 @@ For each GeoJSON file you downloaded, complete the following steps: 2. From the list of layer types, click **Upload file**. 3. Using the File Picker, upload the GeoJSON file. - Depending on the geometry type of your features, this will auto-populate **Index type** with either [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html) and **Index name** with ``. + Depending on the geometry type of your features, this will auto-populate **Index type** with either [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md) and **Index name** with ``. 4. Click **Import file**. diff --git a/explore-analyze/visualize/maps/maps-create-filter-from-map.md b/explore-analyze/visualize/maps/maps-create-filter-from-map.md index 92f0b7bd36..c5ad13896b 100644 --- a/explore-analyze/visualize/maps/maps-create-filter-from-map.md +++ b/explore-analyze/visualize/maps/maps-create-filter-from-map.md @@ -35,7 +35,7 @@ A spatial filter narrows search results to documents that either intersect with, Spatial filters have the following properties: * **Geometry label** enables you to provide a meaningful name for your spatial filter. -* **Spatial relation** determines the [spatial relation operator](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-shape-query.html#geo-shape-spatial-relations) to use at search time. +* **Spatial relation** determines the [spatial relation operator](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-geo-shape-query.md#geo-shape-spatial-relations) to use at search time. * **Action** specifies whether to apply the filter to the current view or to a drilldown action. ::::{note} diff --git a/explore-analyze/visualize/maps/maps-grid-aggregation.md b/explore-analyze/visualize/maps/maps-grid-aggregation.md index 06291ead3b..5638e83811 100644 --- a/explore-analyze/visualize/maps/maps-grid-aggregation.md +++ b/explore-analyze/visualize/maps/maps-grid-aggregation.md @@ -8,7 +8,7 @@ mapped_pages: # Clusters [maps-grid-aggregation] -Clusters use [Geotile grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geotilegrid-aggregation.html) or [Geohex grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohexgrid-aggregation.html) to group your documents into grids. You can calculate metrics for each gridded cell. +Clusters use [Geotile grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geotilegrid-aggregation.md) or [Geohex grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geohexgrid-aggregation.md) to group your documents into grids. You can calculate metrics for each gridded cell. Symbolize cluster metrics as: diff --git a/explore-analyze/visualize/maps/maps-search-across-multiple-indices.md b/explore-analyze/visualize/maps/maps-search-across-multiple-indices.md index ee002ab8c2..24c642b899 100644 --- a/explore-analyze/visualize/maps/maps-search-across-multiple-indices.md +++ b/explore-analyze/visualize/maps/maps-search-across-multiple-indices.md @@ -20,9 +20,9 @@ One strategy for eliminating unintentional empty layers from a cross index searc ## Use _index in a search [maps-add-index-search] -Add [_index](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html) to your search to include documents from indices that do not contain a search field. +Add [_index](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-index-field.md) to your search to include documents from indices that do not contain a search field. -For example, suppose you have a vector layer showing the `kibana_sample_data_logs` documents and another vector layer with `kibana_sample_data_flights` documents. (See [adding sample data](https://www.elastic.co/guide/en/kibana/current/get-started.html) to install the `kibana_sample_data_logs` and `kibana_sample_data_flights` indices.) +For example, suppose you have a vector layer showing the `kibana_sample_data_logs` documents and another vector layer with `kibana_sample_data_flights` documents. (See [adding sample data](/explore-analyze/index.md) to install the `kibana_sample_data_logs` and `kibana_sample_data_flights` indices.) If you query for diff --git a/explore-analyze/visualize/maps/maps-top-hits-aggregation.md b/explore-analyze/visualize/maps/maps-top-hits-aggregation.md index 66aa15451f..05a9845714 100644 --- a/explore-analyze/visualize/maps/maps-top-hits-aggregation.md +++ b/explore-analyze/visualize/maps/maps-top-hits-aggregation.md @@ -8,7 +8,7 @@ mapped_pages: # Display the most relevant documents per entity [maps-top-hits-aggregation] -Use **Top hits per entity** to display the most relevant documents per entity, for example, the most recent GPS tracks per flight route. To get this data, {{es}} first groups your data using a [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html), then accumulates the most relevant documents based on sort order for each entry using a [top hits metric aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html). +Use **Top hits per entity** to display the most relevant documents per entity, for example, the most recent GPS tracks per flight route. To get this data, {{es}} first groups your data using a [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md), then accumulates the most relevant documents based on sort order for each entry using a [top hits metric aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-top-hits-aggregation.md). To enable top hits: diff --git a/explore-analyze/visualize/maps/maps-troubleshooting.md b/explore-analyze/visualize/maps/maps-troubleshooting.md index 25a6d0e078..b40ea002b9 100644 --- a/explore-analyze/visualize/maps/maps-troubleshooting.md +++ b/explore-analyze/visualize/maps/maps-troubleshooting.md @@ -35,7 +35,7 @@ Maps uses the [{{es}} vector tile search API](https://www.elastic.co/docs/api/do ### Data view not listed when adding layer [_data_view_not_listed_when_adding_layer] -* Verify your geospatial data is correctly mapped as [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html). +* Verify your geospatial data is correctly mapped as [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md). * Run `GET myIndexName/_field_caps?fields=myGeoFieldName` in [Console](../../query-filter/tools/console.md), replacing `myIndexName` and `myGeoFieldName` with your index and geospatial field name. * Ensure response specifies `type` as `geo_point` or `geo_shape`. diff --git a/explore-analyze/visualize/maps/point-to-point.md b/explore-analyze/visualize/maps/point-to-point.md index 80590c70ff..6f9b291338 100644 --- a/explore-analyze/visualize/maps/point-to-point.md +++ b/explore-analyze/visualize/maps/point-to-point.md @@ -10,7 +10,7 @@ mapped_pages: A point-to-point connection plots aggregated data paths between the source and the destination. Thicker, darker lines symbolize more connections between a source and destination, and thinner, lighter lines symbolize less connections. -Point to point uses an {{es}} [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) to group your documents by destination. Then, a nested [GeoTile grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geotilegrid-aggregation.html) groups sources for each destination into grids. A line connects each source grid centroid to each destination. +Point to point uses an {{es}} [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) to group your documents by destination. Then, a nested [GeoTile grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geotilegrid-aggregation.md) groups sources for each destination into grids. A line connects each source grid centroid to each destination. Point-to-point layers are used in several common use cases: diff --git a/explore-analyze/visualize/maps/reverse-geocoding-tutorial.md b/explore-analyze/visualize/maps/reverse-geocoding-tutorial.md index 578dc6fec8..9edbb816a1 100644 --- a/explore-analyze/visualize/maps/reverse-geocoding-tutorial.md +++ b/explore-analyze/visualize/maps/reverse-geocoding-tutorial.md @@ -17,7 +17,7 @@ In this tutorial, you’ll use reverse geocoding to visualize United States Cens You’ll learn to: * Upload custom regions. -* Reverse geocode with the {{es}} [enrich processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html). +* Reverse geocode with the {{es}} [enrich processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/enrich-processor.md). * Create a map and visualize CSA regions by web traffic. When you complete this tutorial, you’ll have a map that looks like this: @@ -32,7 +32,7 @@ When you complete this tutorial, you’ll have a map that looks like this: GeoIP is a common way of transforming an IP address to a longitude and latitude. GeoIP is roughly accurate on the city level globally and neighborhood level in selected countries. It’s not as good as an actual GPS location from your phone, but it’s much more precise than just a country, state, or province. -You’ll use the [web logs sample data set](../../index.md#gs-get-data-into-kibana) that comes with Kibana for this tutorial. Web logs sample data set has longitude and latitude. If your web log data does not contain longitude and latitude, use [GeoIP processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) to transform an IP address into a [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) field. +You’ll use the [web logs sample data set](../../index.md#gs-get-data-into-kibana) that comes with Kibana for this tutorial. Web logs sample data set has longitude and latitude. If your web log data does not contain longitude and latitude, use [GeoIP processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) to transform an IP address into a [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) field. ## Step 2: Index Combined Statistical Area (CSA) regions [_step_2_index_combined_statistical_area_csa_regions] diff --git a/explore-analyze/visualize/maps/terms-join.md b/explore-analyze/visualize/maps/terms-join.md index 003b05bfcb..677ab890bc 100644 --- a/explore-analyze/visualize/maps/terms-join.md +++ b/explore-analyze/visualize/maps/terms-join.md @@ -62,7 +62,7 @@ In the following example, **iso2** property defines the shared key for the left The right source uses the Kibana sample data set "Sample web logs". In this data set, the **geo.src** field contains the ISO 3166-1 alpha-2 code of the country of origin. -A [terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html) groups the sample web log documents by **geo.src** and calculates metrics for each term. +A [terms aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-terms-aggregation.md) groups the sample web log documents by **geo.src** and calculates metrics for each term. The METRICS configuration defines two metric aggregations: diff --git a/explore-analyze/visualize/maps/vector-layer.md b/explore-analyze/visualize/maps/vector-layer.md index 5e2ce4e726..cc3aa1de22 100644 --- a/explore-analyze/visualize/maps/vector-layer.md +++ b/explore-analyze/visualize/maps/vector-layer.md @@ -21,7 +21,7 @@ To add a vector layer to your map, click **Add layer**, then select one of the f : Shaded areas to compare statistics across boundaries. **Clusters** -: Geospatial data grouped in grids with metrics for each gridded cell. The index must contain at least one field mapped as [geo_point](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) or [geo_shape](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html). +: Geospatial data grouped in grids with metrics for each gridded cell. The index must contain at least one field mapped as [geo_point](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) or [geo_shape](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-shape.md). **Create index** : Draw shapes on the map and index in Elasticsearch. @@ -32,7 +32,7 @@ To add a vector layer to your map, click **Add layer**, then select one of the f Results are limited to the `index.max_result_window` index setting, which defaults to 10000. Select the appropriate **Scaling** option for your use case. * **Limit results to 10,000** The layer displays features from the first `index.max_result_window` documents. Results exceeding `index.max_result_window` are not displayed. - * **Show clusters when results exceed 10,000** When results exceed `index.max_result_window`, the layer uses [GeoTile grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geotilegrid-aggregation.html) to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents. + * **Show clusters when results exceed 10,000** When results exceed `index.max_result_window`, the layer uses [GeoTile grid aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-geotilegrid-aggregation.md) to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents. * **Use vector tiles.** Vector tiles partition your map into tiles. Each tile request is limited to the `index.max_result_window` index setting. When a tile exceeds `index.max_result_window`, results exceeding `index.max_result_window` are not contained in the tile and a dashed rectangle outlining the bounding box containing all geo values within the tile is displayed. diff --git a/explore-analyze/visualize/maps/vector-style.md b/explore-analyze/visualize/maps/vector-style.md index 545f9e67d7..a6d853625c 100644 --- a/explore-analyze/visualize/maps/vector-style.md +++ b/explore-analyze/visualize/maps/vector-style.md @@ -15,7 +15,7 @@ When styling a vector layer, you can customize your data by property, such as si Use static styling to specify a constant value for a style property. -This image shows an example of static styling using the [Kibana sample web logs](https://www.elastic.co/guide/en/kibana/current/get-started.html) data set. The **kibana_sample_data_logs** layer uses static styling for all properties. +This image shows an example of static styling using the [Kibana sample web logs](/explore-analyze/index.md) data set. The **kibana_sample_data_logs** layer uses static styling for all properties. :::{image} ../../../images/kibana-vector_style_static.png :alt: vector style static diff --git a/explore-analyze/visualize/supported-chart-types.md b/explore-analyze/visualize/supported-chart-types.md index 7403b4446a..3ffbf0fabf 100644 --- a/explore-analyze/visualize/supported-chart-types.md +++ b/explore-analyze/visualize/supported-chart-types.md @@ -89,7 +89,7 @@ Metric aggregations are calculated from the values in the aggregated documents. | Value count | ✓ | | ✓ | ✓ | | Variance | ✓ | ✓ | | ✓ | -For information about {{es}} metrics aggregations, refer to [Metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html). +For information about {{es}} metrics aggregations, refer to [Metrics aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/metrics.md). ## Bucket aggregations [bucket-aggregations] @@ -110,7 +110,7 @@ Bucket aggregations group, or bucket, documents based on the aggregation type. T | Terms | ✓ | ✓ | ✓ | ✓ | | Significant terms | ✓ | | ✓ | ✓ | -For information about {{es}} bucket aggregations, refer to [Bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html). +For information about {{es}} bucket aggregations, refer to [Bucket aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/bucket.md). ## Pipeline aggregations [pipeline-aggregations] @@ -130,5 +130,5 @@ Pipeline aggregations are dependent on the outputs calculated from other aggrega | Bucket selector | | | | ✓ | | Serial differencing | | ✓ | ✓ | ✓ | -For information about {{es}} pipeline aggregations, refer to [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html). +For information about {{es}} pipeline aggregations, refer to [Pipeline aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/pipeline.md). diff --git a/get-started/deployment-options.md b/get-started/deployment-options.md index 8b79fd9d3e..c5ee31b0f6 100644 --- a/get-started/deployment-options.md +++ b/get-started/deployment-options.md @@ -19,5 +19,5 @@ To use {{es}}, you need a running instance of the {{es}} service. You can deploy **Advanced options** * [**Self-managed**](../deploy-manage/deploy/self-managed/installing-elasticsearch.md#elasticsearch-deployment-options): Install, configure, and run {{es}} on your own premises. -* [**Elastic Cloud Enterprise**](https://www.elastic.co/guide/en/cloud-enterprise/current/Elastic-Cloud-Enterprise-overview.html): Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. +* [**Elastic Cloud Enterprise**](/deploy-manage/deploy/cloud-enterprise.md): Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. * [**Elastic Cloud on Kubernetes**](../deploy-manage/deploy/cloud-on-k8s.md): Deploy Elastic Cloud on Kubernetes. diff --git a/get-started/introduction.md b/get-started/introduction.md index b39e5479c2..cf0c033774 100644 --- a/get-started/introduction.md +++ b/get-started/introduction.md @@ -100,7 +100,7 @@ Don’t worry if you have hundreds of dashboards that need to be tagged. Use [** ## Secure {{kib}} [intro-kibana-Security] -{{kib}} offers a range of security features for you to control who has access to what. [Security is enabled automatically](../deploy-manage/security/security-certificates-keys.md) when you enroll {{kib}} with a secured {{es}} cluster. For a description of all available configuration options, refer to [Security settings in {{kib}}](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html). +{{kib}} offers a range of security features for you to control who has access to what. [Security is enabled automatically](../deploy-manage/security/security-certificates-keys.md) when you enroll {{kib}} with a secured {{es}} cluster. For a description of all available configuration options, refer to [Security settings in {{kib}}](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md). ### Log in [_log_in] diff --git a/get-started/the-stack.md b/get-started/the-stack.md index 984e142a61..1d28074eb9 100644 --- a/get-started/the-stack.md +++ b/get-started/the-stack.md @@ -34,7 +34,7 @@ $$$stack-components-agent$$$ {{fleet}} enables you to centrally manage {{agents}} and their policies. Use {{fleet}} to monitor the state of all your {{agents}}, manage agent policies, and upgrade {{agent}} binaries or integrations. - [Learn more about {{fleet}} and {{agent}}](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html). + [Learn more about {{fleet}} and {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md). $$$stack-components-apm$$$ @@ -45,17 +45,17 @@ APM $$$stack-components-beats$$$ {{beats}} -: {{beats}} are data shippers that you install as agents on your servers to send operational data to {{es}}. {{beats}} are available for many standard observability data scenarios, including audit data, log files and journals, cloud data, availability, metrics, network traffic, and Windows event logs. [Learn more about {{beats}}](https://www.elastic.co/guide/en/beats/libbeat/current/beats-reference.html). +: {{beats}} are data shippers that you install as agents on your servers to send operational data to {{es}}. {{beats}} are available for many standard observability data scenarios, including audit data, log files and journals, cloud data, availability, metrics, network traffic, and Windows event logs. [Learn more about {{beats}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md). $$$stack-components-ingest-pipelines$$$ {{es}} ingest pipelines -: Ingest pipelines let you perform common transformations on your data before indexing them into {{es}}. You can configure one or more "processor" tasks to run sequentially, making specific changes to your documents before storing them in {{es}}. [Learn more about ingest pipelines](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html). +: Ingest pipelines let you perform common transformations on your data before indexing them into {{es}}. You can configure one or more "processor" tasks to run sequentially, making specific changes to your documents before storing them in {{es}}. [Learn more about ingest pipelines](/manage-data/ingest/transform-enrich/ingest-pipelines.md). $$$stack-components-logstash$$$ {{ls}} -: {{ls}} is a data collection engine with real-time pipelining capabilities. It can dynamically unify data from disparate sources and normalize the data into destinations of your choice. {{ls}} supports a broad array of input, filter, and output plugins, with many native codecs further simplifying the ingestion process. [Learn more about {{ls}}](https://www.elastic.co/guide/en/logstash/current/introduction.html). +: {{ls}} is a data collection engine with real-time pipelining capabilities. It can dynamically unify data from disparate sources and normalize the data into destinations of your choice. {{ls}} supports a broad array of input, filter, and output plugins, with many native codecs further simplifying the ingestion process. [Learn more about {{ls}}](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md). ### Store [_store] diff --git a/manage-data/data-store/aliases.md b/manage-data/data-store/aliases.md index 4d3773a55e..cebbc995fc 100644 --- a/manage-data/data-store/aliases.md +++ b/manage-data/data-store/aliases.md @@ -313,7 +313,7 @@ Filters are only applied when using the [Query DSL](../../explore-analyze/query- ## Routing [alias-routing] -Use the `routing` option to [route](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-routing-field.html) requests for an alias to a specific shard. This lets you take advantage of [shard caches](https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-request-cache.html) to speed up searches. Data stream aliases do not support routing options. +Use the `routing` option to [route](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-routing-field.md) requests for an alias to a specific shard. This lets you take advantage of [shard caches](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/shard-request-cache-settings.md) to speed up searches. Data stream aliases do not support routing options. ```console POST _aliases diff --git a/manage-data/data-store/data-streams.md b/manage-data/data-store/data-streams.md index 36d9a75933..fdddeb792c 100644 --- a/manage-data/data-store/data-streams.md +++ b/manage-data/data-store/data-streams.md @@ -32,7 +32,7 @@ Keep in mind that some features such as [Time Series Data Streams (TSDS)](../dat ## Backing indices [backing-indices] -A data stream consists of one or more [hidden](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-hidden), auto-generated backing indices. +A data stream consists of one or more [hidden](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-hidden), auto-generated backing indices. :::{image} ../../images/elasticsearch-reference-data-streams-diagram.svg :alt: data streams diagram @@ -40,7 +40,7 @@ A data stream consists of one or more [hidden](https://www.elastic.co/guide/en/e A data stream requires a matching [index template](templates.md). The template contains the mappings and settings used to configure the stream’s backing indices. -Every document indexed to a data stream must contain a `@timestamp` field, mapped as a [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) or [`date_nanos`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html) field type. If the index template doesn’t specify a mapping for the `@timestamp` field, {{es}} maps `@timestamp` as a `date` field with default options. +Every document indexed to a data stream must contain a `@timestamp` field, mapped as a [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md) field type. If the index template doesn’t specify a mapping for the `@timestamp` field, {{es}} maps `@timestamp` as a `date` field with default options. The same index template can be used for multiple data streams. You cannot delete an index template in use by a data stream. diff --git a/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md b/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md index 795c2e6a37..d0a443f0ed 100644 --- a/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md +++ b/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md @@ -83,7 +83,7 @@ POST /my-time-series-index/_downsample/my-downsampled-time-series-index } ``` -To downsample time series data as part of ILM, include a [Downsample action](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) in your ILM policy and set `fixed_interval` to the level of granularity that you’d like: +To downsample time series data as part of ILM, include a [Downsample action](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-downsample.md) in your ILM policy and set `fixed_interval` to the level of granularity that you’d like: ```console PUT _ilm/policy/my_policy @@ -115,7 +115,7 @@ The result of a time based histogram aggregation is in a uniform bucket size and There are a few things to note about querying downsampled indices: * When you run queries in {{kib}} and through Elastic solutions, a normal response is returned without notification that some of the queried indices are downsampled. -* For [date histogram aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html), only `fixed_intervals` (and not calendar-aware intervals) are supported. +* For [date histogram aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-datehistogram-aggregation.md), only `fixed_intervals` (and not calendar-aware intervals) are supported. * Timezone support comes with caveats: * Date histograms at intervals that are multiples of an hour are based on values generated at UTC. This works well for timezones that are on the hour, e.g. +5:00 or -3:00, but requires offsetting the reported time buckets, e.g. `2020-01-01T10:30:00.000` instead of `2020-03-07T10:00:00.000` for timezone +5:30 (India), if downsampling aggregates values per hour. In this case, the results include the field `downsampled_results_offset: true`, to indicate that the time buckets are shifted. This can be avoided if a downsampling interval of 15 minutes is used, as it allows properly calculating hourly values for the shifted buckets. @@ -135,7 +135,7 @@ The following restrictions and limitations apply for downsampling: * Downsampling data for the same period many times (downsampling of a downsampled index) is supported. The downsampling interval must be a multiple of the interval of the downsampled index. * Downsampling is provided as an ILM action. See [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html). * The new, downsampled index is created on the data tier of the original index and it inherits its settings (for example, the number of shards and replicas). -* The numeric `gauge` and `counter` [metric types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-field-meta.html) are supported. +* The numeric `gauge` and `counter` [metric types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-field-meta.md) are supported. * The downsampling configuration is extracted from the time series data stream [index mapping](./set-up-tsds.md#create-tsds-index-template). The only additional required setting is the downsampling `fixed_interval`. diff --git a/manage-data/data-store/data-streams/logs-data-stream.md b/manage-data/data-store/data-streams/logs-data-stream.md index 37c9934310..784779f072 100644 --- a/manage-data/data-store/data-streams/logs-data-stream.md +++ b/manage-data/data-store/data-streams/logs-data-stream.md @@ -44,7 +44,7 @@ You can also set the index mode and adjust other template settings in [the Elast ## Synthetic source [logsdb-synthetic-source] -If you have the required [subscription](https://www.elastic.co/subscriptions), `logsdb` index mode uses [synthetic `_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source), which omits storing the original `_source` field. Instead, the document source is synthesized from doc values or stored fields upon document retrieval. +If you have the required [subscription](https://www.elastic.co/subscriptions), `logsdb` index mode uses [synthetic `_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md#synthetic-source), which omits storing the original `_source` field. Instead, the document source is synthesized from doc values or stored fields upon document retrieval. If you don’t have the required [subscription](https://www.elastic.co/subscriptions), `logsdb` mode uses the original `_source` field. @@ -65,7 +65,7 @@ In `logsdb` index mode, indices are sorted by the fields `host.name` and `@times * To prioritize the latest data, `host.name` is sorted in ascending order and `@timestamp` is sorted in descending order. -You can override the default sort settings by manually configuring `index.sort.field` and `index.sort.order`. For more details, see [*Index Sorting*](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting.html). +You can override the default sort settings by manually configuring `index.sort.field` and `index.sort.order`. For more details, see [*Index Sorting*](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-sorting-settings.md). To modify the sort configuration of an existing data stream, update the data stream’s component templates, and then perform or wait for a [rollover](../data-streams.md#data-streams-rollover). @@ -83,7 +83,7 @@ To avoid mapping conflicts, consider these options: * **Adjust mappings:** Check your existing mappings to ensure that `host.name` is mapped as a keyword. * **Change sorting:** If needed, you can remove `host.name` from the sort settings and use a different set of fields. Sorting by `@timestamp` can be a good fallback. -* **Switch to a different [index mode](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-mode-setting)**: If resolving `host.name` mapping conflicts is not feasible, you can choose not to use `logsdb` mode. +* **Switch to a different [index mode](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-mode-setting)**: If resolving `host.name` mapping conflicts is not feasible, you can choose not to use `logsdb` mode. ::::{important} On existing data streams, `logsdb` mode is applied on [rollover](../data-streams.md#data-streams-rollover) (automatic or manual). @@ -101,7 +101,7 @@ To configure a routing optimization: * Include the index setting `[index.logsdb.route_on_sort_fields:true]` in the data stream configuration. * [Configure index sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting.html) with two or more fields, in addition to `@timestamp`. -* Make sure the [`_id`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-id-field.html) field is not populated in ingested documents. It should be auto-generated instead. +* Make sure the [`_id`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-id-field.md) field is not populated in ingested documents. It should be auto-generated instead. A custom sort configuration is required, to improve storage efficiency and to minimize hotspots from logging spikes that may route documents to a single shard. For best results, use a few sort fields that have a relatively low cardinality and don’t co-vary (for example, `host.name` and `host.id` are not optimal). diff --git a/manage-data/data-store/data-streams/modify-data-stream.md b/manage-data/data-store/data-streams/modify-data-stream.md index 6385d1d85e..2596a7eabb 100644 --- a/manage-data/data-store/data-streams/modify-data-stream.md +++ b/manage-data/data-store/data-streams/modify-data-stream.md @@ -20,7 +20,7 @@ If you later need to change the mappings or settings for a data stream, you have * [Change a static index setting for a data stream](../data-streams/modify-data-stream.md#change-static-index-setting-for-a-data-stream) ::::{tip} -If your changes include modifications to existing field mappings or [static index settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings), a reindex is often required to apply the changes to a data stream’s backing indices. If you are already performing a reindex, you can use the same process to add new field mappings and change [dynamic index settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings). See [Use reindex to change mappings or settings](../data-streams/modify-data-stream.md#data-streams-use-reindex-to-change-mappings-settings). +If your changes include modifications to existing field mappings or [static index settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-modules-settings), a reindex is often required to apply the changes to a data stream’s backing indices. If you are already performing a reindex, you can use the same process to add new field mappings and change [dynamic index settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings). See [Use reindex to change mappings or settings](../data-streams/modify-data-stream.md#data-streams-use-reindex-to-change-mappings-settings). :::: @@ -89,13 +89,13 @@ To add a mapping for a new field to a data stream, following these steps: ### Change an existing field mapping in a data stream [change-existing-field-mapping-in-a-data-stream] -The documentation for each [mapping parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html) indicates whether you can update it for an existing field using the [update mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping). To update these parameters for an existing field, follow these steps: +The documentation for each [mapping parameter](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-parameters.md) indicates whether you can update it for an existing field using the [update mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping). To update these parameters for an existing field, follow these steps: 1. Update the index template used by the data stream. This ensures the updated field mapping is added to future backing indices created for the stream. For example, `my-data-stream-template` is an existing index template used by `my-data-stream`. - The following [create or update index template](../templates.md) request changes the argument for the `host.ip` field’s [`ignore_malformed`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ignore-malformed.html) mapping parameter to `true`. + The following [create or update index template](../templates.md) request changes the argument for the `host.ip` field’s [`ignore_malformed`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ignore-malformed.md) mapping parameter to `true`. ```console PUT /_index_template/my-data-stream-template @@ -423,7 +423,7 @@ Follow these steps: You can also use a query to reindex only a subset of documents with each request. - The following [reindex API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) request copies documents from `my-data-stream` to `new-data-stream`. The request uses a [`range` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) to only reindex documents with a timestamp within the last week. Note the request’s `op_type` is `create`. + The following [reindex API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) request copies documents from `my-data-stream` to `new-data-stream`. The request uses a [`range` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-range-query.md) to only reindex documents with a timestamp within the last week. Note the request’s `op_type` is `create`. ```console POST /_reindex diff --git a/manage-data/data-store/data-streams/reindex-tsds.md b/manage-data/data-store/data-streams/reindex-tsds.md index af2a9c5a8a..4ea1e1bc2f 100644 --- a/manage-data/data-store/data-streams/reindex-tsds.md +++ b/manage-data/data-store/data-streams/reindex-tsds.md @@ -264,5 +264,5 @@ POST /k9s/_rollover/ This creates a new backing index with the updated index settings. The destination data stream is now ready to accept new documents. -Note that the initial backing index can still accept documents within the range of timestamps derived from the source data stream. If this is not desired, mark it as [read-only](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html#index-blocks-read-only) explicitly. +Note that the initial backing index can still accept documents within the range of timestamps derived from the source data stream. If this is not desired, mark it as [read-only](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-block-settings.md#index-blocks-read-only) explicitly. diff --git a/manage-data/data-store/data-streams/run-downsampling-using-data-stream-lifecycle.md b/manage-data/data-store/data-streams/run-downsampling-using-data-stream-lifecycle.md index eccbb9ec57..d416a1e613 100644 --- a/manage-data/data-store/data-streams/run-downsampling-using-data-stream-lifecycle.md +++ b/manage-data/data-store/data-streams/run-downsampling-using-data-stream-lifecycle.md @@ -314,7 +314,7 @@ POST /datastream/_rollover/ ## View downsampling results [downsampling-dsl-view-results] -By default, data stream lifecycle actions are executed every five minutes. Downsampling takes place after the index is rolled over and the [index time series end time](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) has lapsed as the source index is still expected to receive major writes until then. Index is now rolled over after previous step but its time series range end is likely still in the future. Once index time series range is in the past, re-run the `GET _data_stream` request. +By default, data stream lifecycle actions are executed every five minutes. Downsampling takes place after the index is rolled over and the [index time series end time](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/time-series-index-settings.md#index-time-series-end-time) has lapsed as the source index is still expected to receive major writes until then. Index is now rolled over after previous step but its time series range end is likely still in the future. Once index time series range is in the past, re-run the `GET _data_stream` request. ```console GET _data_stream diff --git a/manage-data/data-store/data-streams/run-downsampling-with-ilm.md b/manage-data/data-store/data-streams/run-downsampling-with-ilm.md index 8161b333f3..1fc7bcf430 100644 --- a/manage-data/data-store/data-streams/run-downsampling-with-ilm.md +++ b/manage-data/data-store/data-streams/run-downsampling-with-ilm.md @@ -29,9 +29,9 @@ Before running this example you may want to try the [Run downsampling manually]( Create an ILM policy for your time series data. While not required, an ILM policy is recommended to automate the management of your time series data stream indices. -To enable downsampling, add a [Downsample action](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) and set [`fixed_interval`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html#ilm-downsample-options) to the downsampling interval at which you want to aggregate the original time series data. +To enable downsampling, add a [Downsample action](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-downsample.md) and set [`fixed_interval`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html#ilm-downsample-options) to the downsampling interval at which you want to aggregate the original time series data. -In this example, an ILM policy is configured for the `hot` phase. The downsample takes place after the index is rolled over and the [index time series end time](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) has lapsed as the source index is still expected to receive major writes until then. {{ilm-cap}} will not proceed with any action that expects the index to not receive writes anymore until the [index’s end time](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) has passed. The {{ilm-cap}} actions that wait on the end time before proceeding are: - [Delete](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete.html) - [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) - [Force merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-forcemerge.html) - [Read only](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-readonly.html) - [Searchable snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) - [Shrink](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-shrink.html) +In this example, an ILM policy is configured for the `hot` phase. The downsample takes place after the index is rolled over and the [index time series end time](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/time-series-index-settings.md#index-time-series-end-time) has lapsed as the source index is still expected to receive major writes until then. {{ilm-cap}} will not proceed with any action that expects the index to not receive writes anymore until the [index’s end time](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) has passed. The {{ilm-cap}} actions that wait on the end time before proceeding are: - [Delete](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-delete.md) - [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) - [Force merge](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-forcemerge.md) - [Read only](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-readonly.md) - [Searchable snapshot](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) - [Shrink](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-shrink.md) ```console PUT _ilm/policy/datastream_policy diff --git a/manage-data/data-store/data-streams/set-up-data-stream.md b/manage-data/data-store/data-streams/set-up-data-stream.md index db7836e9a7..efd01f97d9 100644 --- a/manage-data/data-store/data-streams/set-up-data-stream.md +++ b/manage-data/data-store/data-streams/set-up-data-stream.md @@ -18,7 +18,7 @@ You can also [convert an index alias to a data stream](#convert-index-alias-to-d ::::{important} If you use {{fleet}}, {{agent}}, or {{ls}}, skip this tutorial. They all set up data streams for you. -For {{fleet}} and {{agent}}, check out this [data streams documentation](https://www.elastic.co/guide/en/fleet/current/data-streams.html). For {{ls}}, check out the [data streams settings](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-data_stream) for the `elasticsearch output` plugin. +For {{fleet}} and {{agent}}, check out this [data streams documentation](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/data-streams.md). For {{ls}}, check out the [data streams settings](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data_stream) for the `elasticsearch output` plugin. :::: @@ -89,13 +89,13 @@ A data stream requires a matching index template. In most cases, you compose thi When creating your component templates, include: -* A [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) or [`date_nanos`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html) mapping for the `@timestamp` field. If you don’t specify a mapping, {{es}} maps `@timestamp` as a `date` field with default options. +* A [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md) mapping for the `@timestamp` field. If you don’t specify a mapping, {{es}} maps `@timestamp` as a `date` field with default options. * Your lifecycle policy in the `index.lifecycle.name` index setting. ::::{tip} Use the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current) when mapping your fields. ECS fields integrate with several {{stack}} features by default. -If you’re unsure how to map your fields, use [runtime fields](../mapping/define-runtime-fields-in-search-request.md) to extract fields from [unstructured content](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#mapping-unstructured-content) at search time. For example, you can index a log message to a `wildcard` field and later extract IP addresses and other data from this field during a search. +If you’re unsure how to map your fields, use [runtime fields](../mapping/define-runtime-fields-in-search-request.md) to extract fields from [unstructured content](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md#mapping-unstructured-content) at search time. For example, you can index a log message to a `wildcard` field and later extract IP addresses and other data from this field during a search. :::: diff --git a/manage-data/data-store/data-streams/set-up-tsds.md b/manage-data/data-store/data-streams/set-up-tsds.md index 7af4637186..66a3ed10e0 100644 --- a/manage-data/data-store/data-streams/set-up-tsds.md +++ b/manage-data/data-store/data-streams/set-up-tsds.md @@ -90,11 +90,11 @@ PUT _ilm/policy/my-weather-sensor-lifecycle-policy To setup a TSDS create an index template with the following details: -* One or more index patterns that match the TSDS’s name. We recommend using our [data stream naming scheme](https://www.elastic.co/guide/en/fleet/current/data-streams.html#data-streams-naming-scheme). +* One or more index patterns that match the TSDS’s name. We recommend using our [data stream naming scheme](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/data-streams.md#data-streams-naming-scheme). * Enable data streams. * Specify a mapping that defines your dimensions and metrics: - * One or more [dimension fields](time-series-data-stream-tsds.md#time-series-dimension) with a `time_series_dimension` value of `true`. Alternatively, one or more [pass-through](https://www.elastic.co/guide/en/elasticsearch/reference/current/passthrough.html#passthrough-dimensions) fields configured as dimension containers, provided that they will contain at least one sub-field (mapped statically or dynamically). + * One or more [dimension fields](time-series-data-stream-tsds.md#time-series-dimension) with a `time_series_dimension` value of `true`. Alternatively, one or more [pass-through](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/passthrough.md#passthrough-dimensions) fields configured as dimension containers, provided that they will contain at least one sub-field (mapped statically or dynamically). * One or more [metric fields](time-series-data-stream-tsds.md#time-series-metric), marked using the `time_series_metric` mapping parameter. * Optional: A `date` or `date_nanos` mapping for the `@timestamp` field. If you don’t specify a mapping, Elasticsearch maps `@timestamp` as a `date` field with default options. @@ -102,7 +102,7 @@ To setup a TSDS create an index template with the following details: * Set `index.mode` setting to `time_series`. * Your lifecycle policy in the `index.lifecycle.name` index setting. - * Optional: Other index settings, such as [`index.number_of_replicas`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-number-of-replicas), for your TSDS’s backing indices. + * Optional: Other index settings, such as [`index.number_of_replicas`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-number-of-replicas), for your TSDS’s backing indices. * A priority higher than `200` to avoid collisions with built-in templates. See [Avoid index pattern collisions](../templates.md#avoid-index-pattern-collisions). * Optional: Component templates containing your mappings and other index settings. diff --git a/manage-data/data-store/data-streams/time-series-data-stream-tsds.md b/manage-data/data-store/data-streams/time-series-data-stream-tsds.md index a4723873a4..748089709a 100644 --- a/manage-data/data-store/data-streams/time-series-data-stream-tsds.md +++ b/manage-data/data-store/data-streams/time-series-data-stream-tsds.md @@ -29,9 +29,9 @@ A TSDS works like a regular data stream with some key differences: * {{es}} generates a hidden [`_tsid`](#tsid) metadata field for each document in a TSDS. * A TSDS uses [time-bound backing indices](#time-bound-indices) to store data from the same time period in the same backing index. * The matching index template for a TSDS must contain the `index.routing_path` index setting. A TSDS uses this setting to perform [dimension-based routing](#dimension-based-routing). -* A TSDS uses internal [index sorting](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-index-sorting.html) to order shard segments by `_tsid` and `@timestamp`. +* A TSDS uses internal [index sorting](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index-sorting-settings.md) to order shard segments by `_tsid` and `@timestamp`. * TSDS documents only support auto-generated document `_id` values. For TSDS documents, the document `_id` is a hash of the document’s dimensions and `@timestamp`. A TSDS doesn’t support custom document `_id` values. -* A TSDS uses [synthetic `_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source), and as a result is subject to some [restrictions](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source-restrictions) and [modifications](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source-modifications) applied to the `_source` field. +* A TSDS uses [synthetic `_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md#synthetic-source), and as a result is subject to some [restrictions](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source-restrictions) and [modifications](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source-modifications) applied to the `_source` field. ::::{note} A time series index can contain fields other than dimensions or metrics. @@ -63,18 +63,18 @@ A TSDS document is uniquely identified by its time series and timestamp, both of You mark a field as a dimension using the boolean `time_series_dimension` mapping parameter. The following field types support the `time_series_dimension` parameter: -* [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#keyword-field-type) -* [`ip`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ip.html) -* [`byte`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) +* [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md#keyword-field-type) +* [`ip`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/ip.md) +* [`byte`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) * [`short`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) * [`integer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) * [`long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) * [`unsigned_long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) -* [`boolean`](https://www.elastic.co/guide/en/elasticsearch/reference/current/boolean.html) +* [`boolean`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/boolean.md) -For a flattened field, use the `time_series_dimensions` parameter to configure an array of fields as dimensions. For details refer to [`flattened`](https://www.elastic.co/guide/en/elasticsearch/reference/current/flattened.html#flattened-params). +For a flattened field, use the `time_series_dimensions` parameter to configure an array of fields as dimensions. For details refer to [`flattened`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/flattened.md#flattened-params). -Dimension definitions can be simplified through [pass-through](https://www.elastic.co/guide/en/elasticsearch/reference/current/passthrough.html#passthrough-dimensions) fields. +Dimension definitions can be simplified through [pass-through](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/passthrough.md#passthrough-dimensions) fields. ### Metrics [time-series-metric] @@ -85,8 +85,8 @@ Metrics differ from dimensions in that while dimensions generally remain constan To mark a field as a metric, you must specify a metric type using the `time_series_metric` mapping parameter. The following field types support the `time_series_metric` parameter: -* [`aggregate_metric_double`](https://www.elastic.co/guide/en/elasticsearch/reference/current/aggregate-metric-double.html) -* [`histogram`](https://www.elastic.co/guide/en/elasticsearch/reference/current/histogram.html) +* [`aggregate_metric_double`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/aggregate-metric-double.md) +* [`histogram`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/histogram.md) * All [numeric field types](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) Accepted metric types vary based on the field type: @@ -120,7 +120,7 @@ Due to the cumulative nature of counter fields, the following aggregations are s ## Time series mode [time-series-mode] -The matching index template for a TSDS must contain a `data_stream` object with the `index_mode: time_series` option. This option ensures the TSDS creates backing indices with an [`index.mode`](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-mode) setting of `time_series`. This setting enables most TSDS-related functionality in the backing indices. +The matching index template for a TSDS must contain a `data_stream` object with the `index_mode: time_series` option. This option ensures the TSDS creates backing indices with an [`index.mode`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/time-series-index-settings.md#index-mode) setting of `time_series`. This setting enables most TSDS-related functionality in the backing indices. If you convert an existing data stream to a TSDS, only backing indices created after the conversion have an `index.mode` of `time_series`. You can’t change the `index.mode` of an existing backing index. @@ -129,7 +129,7 @@ If you convert an existing data stream to a TSDS, only backing indices created a When you add a document to a TSDS, {{es}} automatically generates a `_tsid` metadata field for the document. The `_tsid` is an object containing the document’s dimensions. Documents in the same TSDS with the same `_tsid` are part of the same time series. -The `_tsid` field is not queryable or updatable. You also can’t retrieve a document’s `_tsid` using a [get document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) request. However, you can use the `_tsid` field in aggregations and retrieve the `_tsid` value in searches using the [`fields` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#search-fields-param). +The `_tsid` field is not queryable or updatable. You also can’t retrieve a document’s `_tsid` using a [get document](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) request. However, you can use the `_tsid` field in aggregations and retrieve the `_tsid` value in searches using the [`fields` parameter](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md#search-fields-param). ::::{warning} The format of the `_tsid` field shouldn’t be relied upon. It may change from version to version. @@ -148,7 +148,7 @@ When you add a document to a TSDS, {{es}} adds the document to the appropriate b ::: ::::{tip} -Some {{ilm-init}} actions mark the source index as read-only, or expect the index to not be actively written anymore in order to provide good performance. These actions are: - [Delete](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete.html) - [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) - [Force merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-forcemerge.html) - [Read only](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-readonly.html) - [Searchable snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) - [Shrink](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-shrink.html) {{ilm-cap}} will **not** proceed with executing these actions until the upper time-bound for accepting writes, represented by the [`index.time_series.end_time`](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) index setting, has lapsed. +Some {{ilm-init}} actions mark the source index as read-only, or expect the index to not be actively written anymore in order to provide good performance. These actions are: - [Delete](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-delete.md) - [Downsample](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-downsample.md) - [Force merge](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-forcemerge.md) - [Read only](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-readonly.md) - [Searchable snapshot](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) - [Shrink](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-shrink.md) {{ilm-cap}} will **not** proceed with executing these actions until the upper time-bound for accepting writes, represented by the [`index.time_series.end_time`](https://www.elastic.co/guide/en/elasticsearch/reference/current/tsds-index-settings.html#index-time-series-end-time) index setting, has lapsed. :::: @@ -206,7 +206,7 @@ TSDS documents don’t support a custom `_routing` value. Similarly, you can’t ### Index sorting [tsds-index-sorting] -{{es}} uses [compression algorithms](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-codec) to compress repeated values. This compression works best when repeated values are stored near each other — in the same index, on the same shard, and side-by-side in the same shard segment. +{{es}} uses [compression algorithms](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-codec) to compress repeated values. This compression works best when repeated values are stored near each other — in the same index, on the same shard, and side-by-side in the same shard segment. Most time series data contains repeated values. Dimensions are repeated across documents in the same time series. The metric values of a time series may also change slowly over time. diff --git a/manage-data/data-store/data-streams/use-data-stream.md b/manage-data/data-store/data-streams/use-data-stream.md index 14ccb8cff3..311837a136 100644 --- a/manage-data/data-store/data-streams/use-data-stream.md +++ b/manage-data/data-store/data-streams/use-data-stream.md @@ -165,9 +165,9 @@ POST /my-data-stream/_delete_by_query If needed, you can update or delete documents in a data stream by sending requests to the backing index containing the document. You’ll need: -* The [document ID](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-id-field.html) +* The [document ID](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-id-field.md) * The name of the backing index containing the document -* If updating the document, its [sequence number and primary term](https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html) +* If updating the document, its [sequence number and primary term](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/optimistic-concurrency-control.md) To get this information, use a [search request](#search-a-data-stream): diff --git a/manage-data/data-store/index-basics.md b/manage-data/data-store/index-basics.md index ddadfdc602..60d4148d73 100644 --- a/manage-data/data-store/index-basics.md +++ b/manage-data/data-store/index-basics.md @@ -52,7 +52,7 @@ A simple {{es}} document might look like this: ### Metadata fields [elasticsearch-intro-documents-fields-data-metadata] -An indexed document contains data and metadata. [Metadata fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html) are system fields that store information about the documents. In {{es}}, metadata fields are prefixed with an underscore. For example, the following fields are metadata fields: +An indexed document contains data and metadata. [Metadata fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/document-metadata-fields.md) are system fields that store information about the documents. In {{es}}, metadata fields are prefixed with an underscore. For example, the following fields are metadata fields: * `_index`: The name of the index where the document is stored. * `_id`: The document’s ID. IDs must be unique per index. @@ -60,7 +60,7 @@ An indexed document contains data and metadata. [Metadata fields](https://www.el ### Mappings and data types [elasticsearch-intro-documents-fields-mappings] -Each index has a [mapping](/manage-data/data-store/mapping.md) or schema for how the fields in your documents are indexed. A mapping defines the [data type](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html) for each field, how the field should be indexed, and how it should be stored. +Each index has a [mapping](/manage-data/data-store/mapping.md) or schema for how the fields in your documents are indexed. A mapping defines the [data type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md) for each field, how the field should be indexed, and how it should be stored. ## Index management @@ -79,7 +79,7 @@ Investigate your indices and perform operations from the **Indices** view. * To show details and perform operations, click the index name. To perform operations on multiple indices, select their checkboxes and then open the **Manage** menu. For more information on managing indices, refer to [Index APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html). * To filter the list of indices, use the search bar or click a badge. Badges indicate if an index is a [follower index](https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html), a [rollup index](https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html), or [frozen](https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html). -* To drill down into the index [mappings](/manage-data/data-store/mapping.md), [settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings), and statistics, click an index name. From this view, you can navigate to **Discover** to further explore the documents in the index. +* To drill down into the index [mappings](/manage-data/data-store/mapping.md), [settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-modules-settings), and statistics, click an index name. From this view, you can navigate to **Discover** to further explore the documents in the index. * To create new indices, use the **Create index** wizard. ### Manage data streams @@ -132,7 +132,7 @@ Create, edit, clone, and delete your component templates in the **Component Temp ### Manage enrich policies -An [enrich policy](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest-enriching-data.html#enrich-policy) is a set of configuration options used to add the right enrich data to the right incoming documents. +An [enrich policy](/manage-data/ingest/transform-enrich/data-enrichment.md#enrich-policy) is a set of configuration options used to add the right enrich data to the right incoming documents. Add data from your existing indices to incoming documents using the **Enrich Policies** view. diff --git a/manage-data/data-store/mapping.md b/manage-data/data-store/mapping.md index 5c53a96f79..fd37255c04 100644 --- a/manage-data/data-store/mapping.md +++ b/manage-data/data-store/mapping.md @@ -32,7 +32,7 @@ $$$mapping-explicit$$$ Mapping is the process of defining how a document and the fields it contains are stored and indexed. -Each document is a collection of fields, which each have their own [data type](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). When mapping your data, you create a mapping definition, which contains a list of fields that are pertinent to the document. A mapping definition also includes [metadata fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html), like the `_source` field, which customize how a document’s associated metadata is handled. +Each document is a collection of fields, which each have their own [data type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md). When mapping your data, you create a mapping definition, which contains a list of fields that are pertinent to the document. A mapping definition also includes [metadata fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/document-metadata-fields.md), like the `_source` field, which customize how a document’s associated metadata is handled. Depending on where you are in your data journey, use *dynamic mapping* and *explicit mapping* to define your data. For example, you can explicitly map fields where you don’t want to use the defaults, or to gain greater control over which fields are created. Then you can allow {{es}} to dynamically map other fields. Using a combination of dynamic and explicit mapping on the same index is especially useful when you have a mix of known and unknown fields in your data. @@ -42,7 +42,7 @@ Before 7.0.0, the mapping definition included a type name. {{es}} 7.0.0 and late ## Dynamic mapping [mapping-dynamic] -When you use [dynamic mapping](/manage-data/data-store/mapping/dynamic-mapping.md), {{es}} automatically detects the data types of fields in your documents and creates mappings for you. If you index additional documents with new fields, {{es}} will add these fields automatically. You can add fields to the top-level mapping, and to inner [`object`](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) and [`nested`](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) fields. Dynamic mapping helps you get started quickly, but might yield suboptimal results for your specific use case due to automatic field type inference. +When you use [dynamic mapping](/manage-data/data-store/mapping/dynamic-mapping.md), {{es}} automatically detects the data types of fields in your documents and creates mappings for you. If you index additional documents with new fields, {{es}} will add these fields automatically. You can add fields to the top-level mapping, and to inner [`object`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) and [`nested`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/nested.md) fields. Dynamic mapping helps you get started quickly, but might yield suboptimal results for your specific use case due to automatic field type inference. Use [dynamic templates](/manage-data/data-store/mapping/dynamic-templates.md) to define custom mappings that are applied to dynamically added fields based on the matching condition. @@ -55,7 +55,7 @@ Defining your own mappings enables you to: * Define which string fields should be treated as full-text fields. * Define which fields contain numbers, dates, or geolocations. * Use data types that cannot be automatically detected (such as `geo_point` and `geo_shape`.) -* Choose date value [formats](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html), including custom date formats. +* Choose date value [formats](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-date-format.md), including custom date formats. * Create custom rules to control the mapping for [dynamically added fields](/manage-data/data-store/mapping/dynamic-mapping.md). * Optimize fields for partial matching. * Perform language-specific text analysis. @@ -84,11 +84,11 @@ In most cases, you can’t change mappings for fields that are already mapped. T However, you can update mappings under certain conditions: * You can add new fields to an existing mapping at any time, dynamically or explicitly. -* You can add new [multi-fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) for existing fields. +* You can add new [multi-fields](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md) for existing fields. * Documents indexed before the mapping update will not have values for the new multi-fields until they are updated or reindexed. Documents indexed after the mapping change will automatically have values for the new multi-fields. -* Some [mapping parameters](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html) can be updated for existing fields of certain [data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). +* Some [mapping parameters](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-parameters.md) can be updated for existing fields of certain [data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). ## Prevent mapping explosions [mapping-limit-settings] @@ -97,4 +97,4 @@ Defining too many fields in an index can lead to a mapping explosion, which can Consider a situation where every new document inserted introduces new fields, such as with [dynamic mapping](/manage-data/data-store/mapping/dynamic-mapping.md). Each new field is added to the index mapping, which can become a problem as the mapping grows. -Use the [mapping limit settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-settings-limit.html) to limit the number of field mappings (created manually or dynamically) and prevent documents from causing a mapping explosion. \ No newline at end of file +Use the [mapping limit settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/mapping-limit-settings.md) to limit the number of field mappings (created manually or dynamically) and prevent documents from causing a mapping explosion. \ No newline at end of file diff --git a/manage-data/data-store/mapping/define-runtime-fields-in-search-request.md b/manage-data/data-store/mapping/define-runtime-fields-in-search-request.md index b4b4f42769..1ae307555c 100644 --- a/manage-data/data-store/mapping/define-runtime-fields-in-search-request.md +++ b/manage-data/data-store/mapping/define-runtime-fields-in-search-request.md @@ -71,7 +71,7 @@ PUT my-index-000001/_mapping Runtime fields take precedence over fields defined with the same name in the index mappings. This flexibility allows you to shadow existing fields and calculate a different value, without modifying the field itself. If you made a mistake in your index mapping, you can use runtime fields to calculate values that [override values](override-field-values-at-query-time.md) in the mapping during the search request. -Now, you can easily run an [average aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html) on the `measures.start` and `measures.end` fields: +Now, you can easily run an [average aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-avg-aggregation.md) on the `measures.start` and `measures.end` fields: ```console GET my-index-000001/_search @@ -106,7 +106,7 @@ The response includes the aggregation results without changing the values for th } ``` -Further, you can define a runtime field as part of a search query that calculates a value, and then run a [stats aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html) on that field *in the same query*. +Further, you can define a runtime field as part of a search query that calculates a value, and then run a [stats aggregation](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-stats-aggregation.md) on that field *in the same query*. The `duration` runtime field doesn’t exist in the index mapping, but we can still search and aggregate on that field. The following query returns the calculated value for the `duration` field and runs a stats aggregation to compute statistics over numeric values extracted from the aggregated documents. diff --git a/manage-data/data-store/mapping/dynamic-field-mapping.md b/manage-data/data-store/mapping/dynamic-field-mapping.md index 0d8c88b89e..6045e702c0 100644 --- a/manage-data/data-store/mapping/dynamic-field-mapping.md +++ b/manage-data/data-store/mapping/dynamic-field-mapping.md @@ -5,12 +5,12 @@ mapped_pages: # Dynamic field mapping [dynamic-field-mapping] -When {{es}} detects a new field in a document, it *dynamically* adds the field to the type mapping by default. The [`dynamic`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic.html) parameter controls this behavior. +When {{es}} detects a new field in a document, it *dynamically* adds the field to the type mapping by default. The [`dynamic`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dynamic.md) parameter controls this behavior. You can explicitly instruct {{es}} to dynamically create fields based on incoming documents by setting the `dynamic` parameter to `true` or `runtime`. When dynamic field mapping is enabled, {{es}} uses the rules in the following table to determine how to map data types for each field. ::::{note} -The field data types in the following table are the only [field data types](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html) that {{es}} detects dynamically. You must explicitly map all other data types. +The field data types in the following table are the only [field data types](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-data-types.md) that {{es}} detects dynamically. You must explicitly map all other data types. :::: @@ -30,7 +30,7 @@ $$$dynamic-field-mapping-types$$$ | `string` that passes [numeric detection](#numeric-detection) | `float` or `long` | `double` or `long` | | `string` that doesn’t pass `date` detection or `numeric` detection | `text` with a `.keyword` sub-field | `keyword` | -You can disable dynamic mapping, both at the document and at the [`object`](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) level. Setting the `dynamic` parameter to `false` ignores new fields, and `strict` rejects the document if {{es}} encounters an unknown field. +You can disable dynamic mapping, both at the document and at the [`object`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) level. Setting the `dynamic` parameter to `false` ignores new fields, and `strict` rejects the document if {{es}} encounters an unknown field. ::::{tip} Use the [update mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) to update the `dynamic` setting on existing fields. @@ -41,11 +41,11 @@ You can customize dynamic field mapping rules for [date detection](#date-detecti ## Date detection [date-detection] -If `date_detection` is enabled (default), then new string fields are checked to see whether their contents match any of the date patterns specified in `dynamic_date_formats`. If a match is found, a new [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) field is added with the corresponding format. +If `date_detection` is enabled (default), then new string fields are checked to see whether their contents match any of the date patterns specified in `dynamic_date_formats`. If a match is found, a new [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) field is added with the corresponding format. The default value for `dynamic_date_formats` is: -[ [`"strict_date_optional_time"`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#strict-date-time),`"yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z"`] +[ [`"strict_date_optional_time"`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-date-format.md#strict-date-time),`"yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z"`] For example: @@ -79,7 +79,7 @@ PUT my-index-000001/_doc/1 <1> } ``` -1. The `create_date` field has been added as a [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field. +1. The `create_date` field has been added as a [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) field. @@ -205,7 +205,7 @@ PUT my-index-000001/_doc/1 } ``` -1. The `my_float` field is added as a [`float`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) field. +1. The `my_float` field is added as a [`float`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) field. 2. The `my_integer` field is added as a [`long`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) field. diff --git a/manage-data/data-store/mapping/dynamic-templates.md b/manage-data/data-store/mapping/dynamic-templates.md index 928cbba101..041310ca0a 100644 --- a/manage-data/data-store/mapping/dynamic-templates.md +++ b/manage-data/data-store/mapping/dynamic-templates.md @@ -171,7 +171,7 @@ PUT my-index-000001/_doc/1 ``` 1. The `my_integer` field is mapped as an `integer`. -2. The `my_string` field is mapped as a `text`, with a `keyword` [multi-field](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html). +2. The `my_string` field is mapped as a `text`, with a `keyword` [multi-field](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/multi-fields.md). 3. The `my_boolean` field is mapped as a `keyword`. 4. The `field.count` field is mapped as a `long`. @@ -356,7 +356,7 @@ PUT my-index-000001/_doc/2 ## Template variables [template-variables] -The `{{name}}` and `{{dynamic_type}}` placeholders are replaced in the `mapping` with the field name and detected dynamic type. The following example sets all string fields to use an [`analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analyzer.html) with the same name as the field, and disables [`doc_values`](https://www.elastic.co/guide/en/elasticsearch/reference/current/doc-values.html) for all non-string fields: +The `{{name}}` and `{{dynamic_type}}` placeholders are replaced in the `mapping` with the field name and detected dynamic type. The following example sets all string fields to use an [`analyzer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/analyzer.md) with the same name as the field, and disables [`doc_values`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/doc-values.md) for all non-string fields: ```console PUT my-index-000001 diff --git a/manage-data/data-store/mapping/explicit-mapping.md b/manage-data/data-store/mapping/explicit-mapping.md index 2b915ff2f0..7654e2131d 100644 --- a/manage-data/data-store/mapping/explicit-mapping.md +++ b/manage-data/data-store/mapping/explicit-mapping.md @@ -27,9 +27,9 @@ PUT /my-index-000001 } ``` -1. Creates `age`, an [`integer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html) field -2. Creates `email`, a [`keyword`](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html) field -3. Creates `name`, a [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field +1. Creates `age`, an [`integer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/number.md) field +2. Creates `email`, a [`keyword`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md) field +3. Creates `name`, a [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) field @@ -37,7 +37,7 @@ PUT /my-index-000001 You can use the [update mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) API to add one or more new fields to an existing index. -The following example adds `employee-id`, a `keyword` field with an [`index`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index.html) mapping parameter value of `false`. This means values for the `employee-id` field are stored but not indexed or available for search. +The following example adds `employee-id`, a `keyword` field with an [`index`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-index.md) mapping parameter value of `false`. This means values for the `employee-id` field are stored but not indexed or available for search. ```console PUT /my-index-000001/_mapping @@ -54,13 +54,13 @@ PUT /my-index-000001/_mapping ## Update the mapping of a field [update-mapping] -Except for supported [mapping parameters](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html), you can’t change the mapping or field type of an existing field. Changing an existing field could invalidate data that’s already indexed. +Except for supported [mapping parameters](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-parameters.md), you can’t change the mapping or field type of an existing field. Changing an existing field could invalidate data that’s already indexed. If you need to change the mapping of a field in a data stream’s backing indices, see [Change mappings and settings for a data stream](../data-streams/modify-data-stream.md#data-streams-change-mappings-and-settings). If you need to change the mapping of a field in other indices, create a new index with the correct mapping and [reindex](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) your data into that index. -Renaming a field would invalidate data already indexed under the old field name. Instead, add an [`alias`](https://www.elastic.co/guide/en/elasticsearch/reference/current/field-alias.html) field to create an alternate field name. +Renaming a field would invalidate data already indexed under the old field name. Instead, add an [`alias`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-alias.md) field to create an alternate field name. ## View the mapping of an index [view-mapping] diff --git a/manage-data/data-store/mapping/explore-data-with-runtime-fields.md b/manage-data/data-store/mapping/explore-data-with-runtime-fields.md index 7ab9c82a83..e1f8184618 100644 --- a/manage-data/data-store/mapping/explore-data-with-runtime-fields.md +++ b/manage-data/data-store/mapping/explore-data-with-runtime-fields.md @@ -235,7 +235,7 @@ If the script didn’t include this condition, the query would fail on any shard ### Search for documents in a specific range [runtime-examples-grok-range] -You can also run a [range query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html) that operates on the `timestamp` field. The following query returns any documents where the `timestamp` is greater than or equal to `2020-04-30T14:31:27-05:00`: +You can also run a [range query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-range-query.md) that operates on the `timestamp` field. The following query returns any documents where the `timestamp` is greater than or equal to `2020-04-30T14:31:27-05:00`: ```console GET my-index-000001/_search @@ -289,7 +289,7 @@ The response includes the document where the log format doesn’t match, but the ## Define a runtime field with a dissect pattern [runtime-examples-dissect] -If you don’t need the power of regular expressions, you can use [dissect patterns](https://www.elastic.co/guide/en/elasticsearch/reference/current/dissect-processor.html) instead of grok patterns. Dissect patterns match on fixed delimiters but are typically faster than grok. +If you don’t need the power of regular expressions, you can use [dissect patterns](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/dissect-processor.md) instead of grok patterns. Dissect patterns match on fixed delimiters but are typically faster than grok. You can use dissect to achieve the same results as parsing the Apache logs with a [grok pattern](#runtime-examples-grok). Instead of matching on a log pattern, you include the parts of the string that you want to discard. Paying special attention to the parts of the string you want to discard will help build successful dissect patterns. diff --git a/manage-data/data-store/mapping/index-runtime-field.md b/manage-data/data-store/mapping/index-runtime-field.md index f8b41b0fb9..c240ab91bf 100644 --- a/manage-data/data-store/mapping/index-runtime-field.md +++ b/manage-data/data-store/mapping/index-runtime-field.md @@ -82,7 +82,7 @@ PUT my-index-000001/_mapping } ``` -You retrieve the calculated values using the [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html) parameter on the `_search` API: +You retrieve the calculated values using the [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md) parameter on the `_search` API: ```console GET my-index-000001/_search diff --git a/manage-data/data-store/mapping/map-runtime-field.md b/manage-data/data-store/mapping/map-runtime-field.md index f78b630093..fa5a56902f 100644 --- a/manage-data/data-store/mapping/map-runtime-field.md +++ b/manage-data/data-store/mapping/map-runtime-field.md @@ -8,7 +8,7 @@ mapped_pages: You map runtime fields by adding a `runtime` section under the mapping definition and defining [a Painless script](../../../explore-analyze/scripting/modules-scripting-using.md). This script has access to the entire context of a document, including the original `_source` via `params._source` and any mapped fields plus their values. At query time, the script runs and generates values for each scripted field that is required for the query. ::::{admonition} Emitting runtime field values -When defining a Painless script to use with runtime fields, you must include the [`emit` method](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-runtime-fields-context.html) to emit calculated values. +When defining a Painless script to use with runtime fields, you must include the [`emit` method](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-runtime-fields-context.md) to emit calculated values. :::: @@ -46,7 +46,7 @@ The `runtime` section can be any of these data types: * `long` * [`lookup`](retrieve-runtime-field.md#lookup-runtime-fields) -Runtime fields with a `type` of `date` can accept the [`format`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html) parameter exactly as the `date` field type. +Runtime fields with a `type` of `date` can accept the [`format`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-date-format.md) parameter exactly as the `date` field type. Runtime fields with a `type` of `lookup` allow retrieving fields from related indices. See [`retrieve fields from related indices`](retrieve-runtime-field.md#lookup-runtime-fields). @@ -85,7 +85,7 @@ PUT my-index-000001/ When no script is provided, {{es}} implicitly looks in `_source` at query time for a field with the same name as the runtime field, and returns a value if one exists. If a field with the same name doesn’t exist, the response doesn’t include any values for that runtime field. -In most cases, retrieve field values through [`doc_values`](https://www.elastic.co/guide/en/elasticsearch/reference/current/doc-values.html) whenever possible. Accessing `doc_values` with a runtime field is faster than retrieving values from `_source` because of how data is loaded from Lucene. +In most cases, retrieve field values through [`doc_values`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/doc-values.md) whenever possible. Accessing `doc_values` with a runtime field is faster than retrieving values from `_source` because of how data is loaded from Lucene. However, there are cases where retrieving fields from `_source` is necessary. For example, `text` fields do not have `doc_values` available by default, so you have to retrieve values from `_source`. In other instances, you might choose to disable `doc_values` on a specific field. diff --git a/manage-data/data-store/mapping/override-field-values-at-query-time.md b/manage-data/data-store/mapping/override-field-values-at-query-time.md index 982c0eb15b..46e0d31611 100644 --- a/manage-data/data-store/mapping/override-field-values-at-query-time.md +++ b/manage-data/data-store/mapping/override-field-values-at-query-time.md @@ -83,7 +83,7 @@ The response includes indexed values for documents matching model number `HG537P The following request defines a runtime field where the script evaluates the `model_number` field where the value is `HG537PU`. For each match, the script multiplies the value for the `voltage` field by `1.7`. -Using the [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html) parameter on the `_search` API, you can retrieve the value that the script calculates for the `measures.voltage` field for documents matching the search request: +Using the [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md) parameter on the `_search` API, you can retrieve the value that the script calculates for the `measures.voltage` field for documents matching the search request: ```console POST my-index-000001/_search diff --git a/manage-data/data-store/mapping/retrieve-runtime-field.md b/manage-data/data-store/mapping/retrieve-runtime-field.md index 2bd383fbdd..77b749f6b7 100644 --- a/manage-data/data-store/mapping/retrieve-runtime-field.md +++ b/manage-data/data-store/mapping/retrieve-runtime-field.md @@ -5,7 +5,7 @@ mapped_pages: # Retrieve a runtime field [runtime-retrieving-fields] -Use the [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html) parameter on the `_search` API to retrieve the values of runtime fields. Runtime fields won’t display in `_source`, but the `fields` API works for all fields, even those that were not sent as part of the original `_source`. +Use the [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md) parameter on the `_search` API to retrieve the values of runtime fields. Runtime fields won’t display in `_source`, but the `fields` API works for all fields, even those that were not sent as part of the original `_source`. ## Define a runtime field to calculate the day of week [runtime-define-field-dayofweek] @@ -199,7 +199,7 @@ POST logs/_search } ``` -1. Define a runtime field in the main search request with a type of `lookup` that retrieves fields from the target index using the [`term`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html) queries. +1. Define a runtime field in the main search request with a type of `lookup` that retrieves fields from the target index using the [`term`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-term-query.md) queries. 2. The target index where the lookup query executes against 3. A field on the main index whose values are used as the input values of the lookup term query 4. A field on the lookup index which the lookup query searches against diff --git a/manage-data/data-store/mapping/runtime-fields.md b/manage-data/data-store/mapping/runtime-fields.md index 613fb7c9aa..9db68c1977 100644 --- a/manage-data/data-store/mapping/runtime-fields.md +++ b/manage-data/data-store/mapping/runtime-fields.md @@ -14,7 +14,7 @@ A *runtime field* is a field that is evaluated at query time. Runtime fields ena You access runtime fields from the search API like any other field, and {{es}} sees runtime fields no differently. You can define runtime fields in the [index mapping](map-runtime-field.md) or in the [search request](define-runtime-fields-in-search-request.md). Your choice, which is part of the inherent flexibility of runtime fields. -Use the [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html) parameter on the `_search` API to [retrieve the values of runtime fields](retrieve-runtime-field.md). Runtime fields won’t display in `_source`, but the `fields` API works for all fields, even those that were not sent as part of the original `_source`. +Use the [`fields`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/retrieve-selected-fields.md) parameter on the `_search` API to [retrieve the values of runtime fields](retrieve-runtime-field.md). Runtime fields won’t display in `_source`, but the `fields` API works for all fields, even those that were not sent as part of the original `_source`. Runtime fields are useful when working with log data (see [examples](explore-data-with-runtime-fields.md)), especially when you’re unsure about the data structure. Your search speed decreases, but your index size is much smaller and you can more quickly process logs without having to index them. @@ -34,9 +34,9 @@ Runtime fields can replace many of the ways you can use scripting with the `_sea You can use [script fields](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html#script-fields) to access values in `_source` and return calculated values based on a script valuation. Runtime fields have the same capabilities, but provide greater flexibility because you can query and aggregate on runtime fields in a search request. Script fields can only fetch values. -Similarly, you could write a [script query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html) that filters documents in a search request based on a script. Runtime fields provide a very similar feature that is more flexible. You write a script to create field values and they are available everywhere, such as [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html), [all queries](../../../explore-analyze/query-filter/languages/querydsl.md), and [aggregations](../../../explore-analyze/query-filter/aggregations.md). +Similarly, you could write a [script query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-script-query.md) that filters documents in a search request based on a script. Runtime fields provide a very similar feature that is more flexible. You write a script to create field values and they are available everywhere, such as [`fields`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-fields.html), [all queries](../../../explore-analyze/query-filter/languages/querydsl.md), and [aggregations](../../../explore-analyze/query-filter/aggregations.md). -You can also use scripts to [sort search results](https://www.elastic.co/guide/en/elasticsearch/reference/current/sort-search-results.html#script-based-sorting), but that same script works exactly the same in a runtime field. +You can also use scripts to [sort search results](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/sort-search-results.md#script-based-sorting), but that same script works exactly the same in a runtime field. If you move a script from any of these sections in a search request to a runtime field that is computing values from the same number of documents, the performance should be about the same. The performance for these features is largely dependent upon the calculations that the included script is running and how many documents the script runs against. diff --git a/manage-data/data-store/templates.md b/manage-data/data-store/templates.md index 01187b6b6a..4f4fc2edb6 100644 --- a/manage-data/data-store/templates.md +++ b/manage-data/data-store/templates.md @@ -34,11 +34,11 @@ The following conditions apply to index templates: * `profiling-*` * `security_solution-*-*` -[{{agent}}](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) uses these templates to create data streams. Index templates created by {{fleet}} integrations use similar overlapping index patterns and have a priority up to `200`. +[{{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) uses these templates to create data streams. Index templates created by {{fleet}} integrations use similar overlapping index patterns and have a priority up to `200`. If you use {{fleet}} or {{agent}}, assign your index templates a priority lower than `100` to avoid overriding these templates. Otherwise, to avoid accidentally applying the templates, do one or more of the following: -* To disable all built-in index and component templates, set [`stack.templates.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-management-settings.html#stack-templates-enabled) to `false` using the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). Note, however, that this is not recommended, see the [setting documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-management-settings.html#stack-templates-enabled) for more information. +* To disable all built-in index and component templates, set [`stack.templates.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-management-settings.md#stack-templates-enabled) to `false` using the [cluster update settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). Note, however, that this is not recommended, see the [setting documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-management-settings.html#stack-templates-enabled) for more information. * Use a non-overlapping index pattern. * Assign templates with an overlapping pattern a `priority` higher than `500`. For example, if you don’t use {{fleet}} or {{agent}} and want to create a template for the `logs-*` index pattern, assign your template a priority of `500`. This ensures your template is applied instead of the built-in template for `logs-*-*`. * To avoid naming collisions with built-in and Fleet-managed index templates, avoid using `@` as part of the name of your own index templates. diff --git a/manage-data/data-store/templates/index-template-management.md b/manage-data/data-store/templates/index-template-management.md index 413a3df5f2..91eaa04342 100644 --- a/manage-data/data-store/templates/index-template-management.md +++ b/manage-data/data-store/templates/index-template-management.md @@ -52,7 +52,7 @@ In this tutorial, you’ll create an index template and use it to configure two ::: 2. Define index settings. These are optional. For this tutorial, leave this section blank. -3. Define a mapping that contains an [object](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) field named `geo` with a child [`geo_point`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) field named `coordinates`: +3. Define a mapping that contains an [object](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) field named `geo` with a child [`geo_point`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) field named `coordinates`: :::{image} ../../../images/elasticsearch-reference-management-index-templates-mappings.png :alt: Mapped fields page diff --git a/manage-data/data-store/text-analysis.md b/manage-data/data-store/text-analysis.md index 3f3355dc33..e6c43b7594 100644 --- a/manage-data/data-store/text-analysis.md +++ b/manage-data/data-store/text-analysis.md @@ -11,7 +11,7 @@ _Text analysis_ is the process of converting unstructured text, like the body of Text analysis enables {{es}} to perform full-text search, where the search returns all *relevant* results rather than just exact matches. For example, if you search for `Quick fox jumps`, you probably want the document that contains `A quick brown fox jumps over the lazy dog`, and you might also want documents that contain related words like `fast fox` or `foxes leap`. -{{es}} performs text analysis when indexing or searching [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) fields. If your index does _not_ contain `text` fields, no further setup is needed; you can skip the pages in this section. If you _do_ use `text` fields or your text searches aren’t returning results as expected, configuring text analysis can often help. You should also look into analysis configuration if you’re using {{es}} to: +{{es}} performs text analysis when indexing or searching [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) fields. If your index does _not_ contain `text` fields, no further setup is needed; you can skip the pages in this section. If you _do_ use `text` fields or your text searches aren’t returning results as expected, configuring text analysis can often help. You should also look into analysis configuration if you’re using {{es}} to: * Build a search engine * Mine unstructured data @@ -44,9 +44,9 @@ To ensure search terms match these words as intended, you can apply the same tok Text analysis is performed by an [*analyzer*](/manage-data/data-store/text-analysis/anatomy-of-an-analyzer.md), a set of rules that govern the entire process. -{{es}} includes a default analyzer, called the [standard analyzer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-analyzer.html), which works well for most use cases right out of the box. +{{es}} includes a default analyzer, called the [standard analyzer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-standard-analyzer.md), which works well for most use cases right out of the box. -If you want to tailor your search experience, you can choose a different [built-in analyzer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html) or even [configure a custom one](/manage-data/data-store/text-analysis/create-custom-analyzer.md). A custom analyzer gives you control over each step of the analysis process, including: +If you want to tailor your search experience, you can choose a different [built-in analyzer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analyzer-reference.md) or even [configure a custom one](/manage-data/data-store/text-analysis/create-custom-analyzer.md). A custom analyzer gives you control over each step of the analysis process, including: * Changes to the text *before* tokenization * How text is converted to tokens diff --git a/manage-data/data-store/text-analysis/anatomy-of-an-analyzer.md b/manage-data/data-store/text-analysis/anatomy-of-an-analyzer.md index 3a7ce86a99..914094750b 100644 --- a/manage-data/data-store/text-analysis/anatomy-of-an-analyzer.md +++ b/manage-data/data-store/text-analysis/anatomy-of-an-analyzer.md @@ -7,30 +7,30 @@ mapped_pages: An *analyzer*  — whether built-in or custom — is just a package which contains three lower-level building blocks: *character filters*, *tokenizers*, and *token filters*. -The built-in [analyzers](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html) pre-package these building blocks into analyzers suitable for different languages and types of text. Elasticsearch also exposes the individual building blocks so that they can be combined to define new [`custom`](create-custom-analyzer.md) analyzers. +The built-in [analyzers](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analyzer-reference.md) pre-package these building blocks into analyzers suitable for different languages and types of text. Elasticsearch also exposes the individual building blocks so that they can be combined to define new [`custom`](create-custom-analyzer.md) analyzers. ## Character filters [analyzer-anatomy-character-filters] A *character filter* receives the original text as a stream of characters and can transform the stream by adding, removing, or changing characters. For instance, a character filter could be used to convert Hindu-Arabic numerals (٠‎١٢٣٤٥٦٧٨‎٩‎) into their Arabic-Latin equivalents (0123456789), or to strip HTML elements like `` from the stream. -An analyzer may have **zero or more** [character filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-charfilters.html), which are applied in order. +An analyzer may have **zero or more** [character filters](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/character-filter-reference.md), which are applied in order. ## Tokenizer [analyzer-anatomy-tokenizer] -A *tokenizer* receives a stream of characters, breaks it up into individual *tokens* (usually individual words), and outputs a stream of *tokens*. For instance, a [`whitespace`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-whitespace-tokenizer.html) tokenizer breaks text into tokens whenever it sees any whitespace. It would convert the text `"Quick brown fox!"` into the terms `[Quick, brown, fox!]`. +A *tokenizer* receives a stream of characters, breaks it up into individual *tokens* (usually individual words), and outputs a stream of *tokens*. For instance, a [`whitespace`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-whitespace-tokenizer.md) tokenizer breaks text into tokens whenever it sees any whitespace. It would convert the text `"Quick brown fox!"` into the terms `[Quick, brown, fox!]`. The tokenizer is also responsible for recording the order or *position* of each term and the start and end *character offsets* of the original word which the term represents. -An analyzer must have **exactly one** [tokenizer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenizers.html). +An analyzer must have **exactly one** [tokenizer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/tokenizer-reference.md). ## Token filters [analyzer-anatomy-token-filters] -A *token filter* receives the token stream and may add, remove, or change tokens. For example, a [`lowercase`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lowercase-tokenfilter.html) token filter converts all tokens to lowercase, a [`stop`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stop-tokenfilter.html) token filter removes common words (*stop words*) like `the` from the token stream, and a [`synonym`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html) token filter introduces synonyms into the token stream. +A *token filter* receives the token stream and may add, remove, or change tokens. For example, a [`lowercase`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-lowercase-tokenfilter.md) token filter converts all tokens to lowercase, a [`stop`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-stop-tokenfilter.md) token filter removes common words (*stop words*) like `the` from the token stream, and a [`synonym`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-synonym-tokenfilter.md) token filter introduces synonyms into the token stream. Token filters are not allowed to change the position or character offsets of each token. -An analyzer may have **zero or more** [token filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenfilters.html), which are applied in order. +An analyzer may have **zero or more** [token filters](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/token-filter-reference.md), which are applied in order. diff --git a/manage-data/data-store/text-analysis/configure-text-analysis.md b/manage-data/data-store/text-analysis/configure-text-analysis.md index e649b5842f..2fbf4571e3 100644 --- a/manage-data/data-store/text-analysis/configure-text-analysis.md +++ b/manage-data/data-store/text-analysis/configure-text-analysis.md @@ -5,9 +5,9 @@ mapped_pages: # Configure text analysis [configure-text-analysis] -By default, {{es}} uses the [`standard` analyzer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-analyzer.html) for all text analysis. The `standard` analyzer gives you out-of-the-box support for most natural languages and use cases. If you chose to use the `standard` analyzer as-is, no further configuration is needed. +By default, {{es}} uses the [`standard` analyzer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-standard-analyzer.md) for all text analysis. The `standard` analyzer gives you out-of-the-box support for most natural languages and use cases. If you chose to use the `standard` analyzer as-is, no further configuration is needed. -If the standard analyzer does not fit your needs, review and test {{es}}'s other built-in [built-in analyzers](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html). Built-in analyzers don’t require configuration, but some support options that can be used to adjust their behavior. For example, you can configure the `standard` analyzer with a list of custom stop words to remove. +If the standard analyzer does not fit your needs, review and test {{es}}'s other built-in [built-in analyzers](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analyzer-reference.md). Built-in analyzers don’t require configuration, but some support options that can be used to adjust their behavior. For example, you can configure the `standard` analyzer with a list of custom stop words to remove. If no built-in analyzer fits your needs, you can test and create a custom analyzer. Custom analyzers involve selecting and combining different [analyzer components](anatomy-of-an-analyzer.md), giving you greater control over the process. diff --git a/manage-data/data-store/text-analysis/configuring-built-in-analyzers.md b/manage-data/data-store/text-analysis/configuring-built-in-analyzers.md index 78e34e9403..94bd4563e5 100644 --- a/manage-data/data-store/text-analysis/configuring-built-in-analyzers.md +++ b/manage-data/data-store/text-analysis/configuring-built-in-analyzers.md @@ -5,7 +5,7 @@ mapped_pages: # Configuring built-in analyzers [configuring-analyzers] -The built-in analyzers can be used directly without any configuration. Some of them, however, support configuration options to alter their behaviour. For instance, the [`standard` analyzer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-analyzer.html) can be configured to support a list of stop words: +The built-in analyzers can be used directly without any configuration. Some of them, however, support configuration options to alter their behaviour. For instance, the [`standard` analyzer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-standard-analyzer.md) can be configured to support a list of stop words: ```console PUT my-index-000001 diff --git a/manage-data/data-store/text-analysis/create-custom-analyzer.md b/manage-data/data-store/text-analysis/create-custom-analyzer.md index d2e72e0470..0c4141ca59 100644 --- a/manage-data/data-store/text-analysis/create-custom-analyzer.md +++ b/manage-data/data-store/text-analysis/create-custom-analyzer.md @@ -7,9 +7,9 @@ mapped_pages: When the built-in analyzers do not fulfill your needs, you can create a `custom` analyzer which uses the appropriate combination of: -* zero or more [character filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-charfilters.html) -* a [tokenizer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenizers.html) -* zero or more [token filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenfilters.html). +* zero or more [character filters](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/character-filter-reference.md) +* a [tokenizer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/tokenizer-reference.md) +* zero or more [token filters](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/token-filter-reference.md). ## Configuration [_configuration] @@ -17,7 +17,7 @@ When the built-in analyzers do not fulfill your needs, you can create a `custom` The `custom` analyzer accepts the following parameters: `type` -: Analyzer type. Accepts [built-in analyzer types](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html). For custom analyzers, use `custom` or omit this parameter. +: Analyzer type. Accepts [built-in analyzer types](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analyzer-reference.md). For custom analyzers, use `custom` or omit this parameter. `tokenizer` : A built-in or customised [tokenizer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenizers.html). (Required) @@ -29,7 +29,7 @@ The `custom` analyzer accepts the following parameters: : An optional array of built-in or customised [token filters](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-tokenfilters.html). `position_increment_gap` -: When indexing an array of text values, Elasticsearch inserts a fake "gap" between the last term of one value and the first term of the next value to ensure that a phrase query doesn’t match two terms from different array elements. Defaults to `100`. See [`position_increment_gap`](https://www.elastic.co/guide/en/elasticsearch/reference/current/position-increment-gap.html) for more. +: When indexing an array of text values, Elasticsearch inserts a fake "gap" between the last term of one value and the first term of the next value to ensure that a phrase query doesn’t match two terms from different array elements. Defaults to `100`. See [`position_increment_gap`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/position-increment-gap.md) for more. ## Example configuration [_example_configuration] @@ -37,16 +37,16 @@ The `custom` analyzer accepts the following parameters: Here is an example that combines the following: Character Filter -: * [HTML Strip Character Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-htmlstrip-charfilter.html) +: * [HTML Strip Character Filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-htmlstrip-charfilter.md) Tokenizer -: * [Standard Tokenizer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-tokenizer.html) +: * [Standard Tokenizer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-standard-tokenizer.md) Token Filters -: * [Lowercase Token Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lowercase-tokenfilter.html) -* [ASCII-Folding Token Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-asciifolding-tokenfilter.html) +: * [Lowercase Token Filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-lowercase-tokenfilter.md) +* [ASCII-Folding Token Filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-asciifolding-tokenfilter.md) ```console @@ -92,16 +92,16 @@ The previous example used tokenizer, token filters, and character filters with t Here is a more complicated example that combines the following: Character Filter -: * [Mapping Character Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-mapping-charfilter.html), configured to replace `:)` with `_happy_` and `:(` with `_sad_` +: * [Mapping Character Filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-mapping-charfilter.md), configured to replace `:)` with `_happy_` and `:(` with `_sad_` Tokenizer -: * [Pattern Tokenizer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-pattern-tokenizer.html), configured to split on punctuation characters +: * [Pattern Tokenizer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-pattern-tokenizer.md), configured to split on punctuation characters Token Filters : * [Lowercase Token Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lowercase-tokenfilter.html) -* [Stop Token Filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stop-tokenfilter.html), configured to use the pre-defined list of English stop words +* [Stop Token Filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-stop-tokenfilter.md), configured to use the pre-defined list of English stop words Here is an example: diff --git a/manage-data/data-store/text-analysis/index-search-analysis.md b/manage-data/data-store/text-analysis/index-search-analysis.md index ca750c0d90..ca5e600109 100644 --- a/manage-data/data-store/text-analysis/index-search-analysis.md +++ b/manage-data/data-store/text-analysis/index-search-analysis.md @@ -8,10 +8,10 @@ mapped_pages: Text analysis occurs at two times: Index time -: When a document is indexed, any [`text`](https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html) field values are analyzed. +: When a document is indexed, any [`text`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/text.md) field values are analyzed. Search time -: When running a [full-text search](https://www.elastic.co/guide/en/elasticsearch/reference/current/full-text-queries.html) on a `text` field, the query string (the text the user is searching for) is analyzed. Search time is also called *query time*. +: When running a [full-text search](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/full-text-queries.md) on a `text` field, the query string (the text the user is searching for) is analyzed. Search time is also called *query time*. For more details on text analysis at search time, refer to [Text analysis during search](/solutions/search/full-text/text-analysis-during-search.md). diff --git a/manage-data/data-store/text-analysis/specify-an-analyzer.md b/manage-data/data-store/text-analysis/specify-an-analyzer.md index 197e3b8545..2a8e9a89de 100644 --- a/manage-data/data-store/text-analysis/specify-an-analyzer.md +++ b/manage-data/data-store/text-analysis/specify-an-analyzer.md @@ -28,10 +28,10 @@ If you don’t typically create mappings for your indices, you can use [index te {{es}} determines which index analyzer to use by checking the following parameters in order: -1. The [`analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analyzer.html) mapping parameter for the field. See [Specify the analyzer for a field](#specify-index-field-analyzer). +1. The [`analyzer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/analyzer.md) mapping parameter for the field. See [Specify the analyzer for a field](#specify-index-field-analyzer). 2. The `analysis.analyzer.default` index setting. See [Specify the default analyzer for an index](#specify-index-time-default-analyzer). -If none of these parameters are specified, the [`standard` analyzer](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-analyzer.html) is used. +If none of these parameters are specified, the [`standard` analyzer](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-standard-analyzer.md) is used. ## Specify the analyzer for a field [specify-index-field-analyzer] @@ -82,7 +82,7 @@ PUT my-index-000001 ::::{warning} In most cases, specifying a different search analyzer is unnecessary. Doing so could negatively impact relevancy and result in unexpected search results. -If you choose to specify a separate search analyzer, we recommend you thoroughly [test your analysis configuration](https://www.elastic.co/guide/en/elasticsearch/reference/current/test-analyzer.html) before deploying in production. +If you choose to specify a separate search analyzer, we recommend you thoroughly [test your analysis configuration](/manage-data/data-store/text-analysis/test-an-analyzer.md) before deploying in production. :::: @@ -90,7 +90,7 @@ If you choose to specify a separate search analyzer, we recommend you thoroughly At search time, {{es}} determines which analyzer to use by checking the following parameters in order: 1. The [`analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analyzer.html) parameter in the search query. See [Specify the search analyzer for a query](#specify-search-query-analyzer). -2. The [`search_analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-analyzer.html) mapping parameter for the field. See [Specify the search analyzer for a field](#specify-search-field-analyzer). +2. The [`search_analyzer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/search-analyzer.md) mapping parameter for the field. See [Specify the search analyzer for a field](#specify-search-field-analyzer). 3. The `analysis.analyzer.default_search` index setting. See [Specify the default search analyzer for an index](#specify-search-default-analyzer). 4. The [`analyzer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analyzer.html) mapping parameter for the field. See [Specify the analyzer for a field](#specify-index-field-analyzer). @@ -99,9 +99,9 @@ If none of these parameters are specified, the [`standard` analyzer](https://www ## Specify the search analyzer for a query [specify-search-query-analyzer] -When writing a [full-text query](https://www.elastic.co/guide/en/elasticsearch/reference/current/full-text-queries.html), you can use the `analyzer` parameter to specify a search analyzer. If provided, this overrides any other search analyzers. +When writing a [full-text query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/full-text-queries.md), you can use the `analyzer` parameter to specify a search analyzer. If provided, this overrides any other search analyzers. -The following [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) request sets the `stop` analyzer as the search analyzer for a [`match`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html) query. +The following [search API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) request sets the `stop` analyzer as the search analyzer for a [`match`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-query.md) query. ```console GET my-index-000001/_search diff --git a/manage-data/data-store/text-analysis/stemming.md b/manage-data/data-store/text-analysis/stemming.md index 3d233fa777..f1053b01a4 100644 --- a/manage-data/data-store/text-analysis/stemming.md +++ b/manage-data/data-store/text-analysis/stemming.md @@ -41,10 +41,10 @@ However, most algorithmic stemmers only alter the existing text of a word. This The following token filters use algorithmic stemming: -* [`stemmer`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stemmer-tokenfilter.html), which provides algorithmic stemming for several languages, some with additional variants. -* [`kstem`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-kstem-tokenfilter.html), a stemmer for English that combines algorithmic stemming with a built-in dictionary. -* [`porter_stem`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-porterstem-tokenfilter.html), our recommended algorithmic stemmer for English. -* [`snowball`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-snowball-tokenfilter.html), which uses [Snowball](https://snowballstem.org/)-based stemming rules for several languages. +* [`stemmer`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-stemmer-tokenfilter.md), which provides algorithmic stemming for several languages, some with additional variants. +* [`kstem`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-kstem-tokenfilter.md), a stemmer for English that combines algorithmic stemming with a built-in dictionary. +* [`porter_stem`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-porterstem-tokenfilter.md), our recommended algorithmic stemmer for English. +* [`snowball`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-snowball-tokenfilter.md), which uses [Snowball](https://snowballstem.org/)-based stemming rules for several languages. ## Dictionary stemmers [dictionary-stemmers] @@ -65,7 +65,7 @@ In practice, algorithmic stemmers typically outperform dictionary stemmers. This * **Dictionary quality**
A dictionary stemmer is only as good as its dictionary. To work well, these dictionaries must include a significant number of words, be updated regularly, and change with language trends. Often, by the time a dictionary has been made available, it’s incomplete and some of its entries are already outdated. * **Size and performance**
Dictionary stemmers must load all words, prefixes, and suffixes from its dictionary into memory. This can use a significant amount of RAM. Low-quality dictionaries may also be less efficient with prefix and suffix removal, which can slow the stemming process significantly. -You can use the [`hunspell`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-hunspell-tokenfilter.html) token filter to perform dictionary stemming. +You can use the [`hunspell`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-hunspell-tokenfilter.md) token filter to perform dictionary stemming. ::::{tip} If available, we recommend trying an algorithmic stemmer for your language before using the [`hunspell`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-hunspell-tokenfilter.html) token filter. @@ -80,10 +80,10 @@ Sometimes stemming can produce shared root words that are spelled similarly but To prevent this and better control stemming, you can use the following token filters: -* [`stemmer_override`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stemmer-override-tokenfilter.html), which lets you define rules for stemming specific tokens. -* [`keyword_marker`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-keyword-marker-tokenfilter.html), which marks specified tokens as keywords. Keyword tokens are not stemmed by subsequent stemmer token filters. -* [`conditional`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-condition-tokenfilter.html), which can be used to mark tokens as keywords, similar to the `keyword_marker` filter. +* [`stemmer_override`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-stemmer-override-tokenfilter.md), which lets you define rules for stemming specific tokens. +* [`keyword_marker`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-keyword-marker-tokenfilter.md), which marks specified tokens as keywords. Keyword tokens are not stemmed by subsequent stemmer token filters. +* [`conditional`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-condition-tokenfilter.md), which can be used to mark tokens as keywords, similar to the `keyword_marker` filter. -For built-in [language analyzers](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html), you also can use the [`stem_exclusion`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html#_excluding_words_from_stemming) parameter to specify a list of words that won’t be stemmed. +For built-in [language analyzers](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-lang-analyzer.md), you also can use the [`stem_exclusion`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html#_excluding_words_from_stemming) parameter to specify a list of words that won’t be stemmed. diff --git a/manage-data/data-store/text-analysis/token-graphs.md b/manage-data/data-store/text-analysis/token-graphs.md index 657632bcbf..a57c24d93c 100644 --- a/manage-data/data-store/text-analysis/token-graphs.md +++ b/manage-data/data-store/text-analysis/token-graphs.md @@ -33,10 +33,10 @@ Some token filters can add tokens that span multiple positions. These can includ However, only some token filters, known as *graph token filters*, accurately record the `positionLength` for multi-position tokens. These filters include: -* [`synonym_graph`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html) -* [`word_delimiter_graph`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-word-delimiter-graph-tokenfilter.html) +* [`synonym_graph`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-synonym-graph-tokenfilter.md) +* [`word_delimiter_graph`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-word-delimiter-graph-tokenfilter.md) -Some tokenizers, such as the [`nori_tokenizer`](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-nori-tokenizer.html), also accurately decompose compound tokens into multi-position tokens. +Some tokenizers, such as the [`nori_tokenizer`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/analysis-nori-tokenizer.md), also accurately decompose compound tokens into multi-position tokens. In the following graph, `domain name system` and its synonym, `dns`, both have a position of `0`. However, `dns` has a `positionLength` of `3`. Other tokens in the graph have a default `positionLength` of `1`. @@ -48,7 +48,7 @@ In the following graph, `domain name system` and its synonym, `dns`, both have a [Indexing](index-search-analysis.md) ignores the `positionLength` attribute and does not support token graphs containing multi-position tokens. -However, queries, such as the [`match`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html) or [`match_phrase`](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query-phrase.html) query, can use these graphs to generate multiple sub-queries from a single query string. +However, queries, such as the [`match`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-query.md) or [`match_phrase`](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-query-phrase.md) query, can use these graphs to generate multiple sub-queries from a single query string. :::::{dropdown} Example A user runs a search for the following phrase using the `match_phrase` query: @@ -78,8 +78,8 @@ This means the query matches documents containing either `dns is fragile` *or* ` The following token filters can add tokens that span multiple positions but only record a default `positionLength` of `1`: -* [`synonym`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html) -* [`word_delimiter`](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-word-delimiter-tokenfilter.html) +* [`synonym`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-synonym-tokenfilter.md) +* [`word_delimiter`](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-word-delimiter-tokenfilter.md) This means these filters will produce invalid token graphs for streams containing such tokens. diff --git a/manage-data/ingest.md b/manage-data/ingest.md index e48a513fd9..cdd918fb9f 100644 --- a/manage-data/ingest.md +++ b/manage-data/ingest.md @@ -27,7 +27,7 @@ Elastic offer tools designed to ingest specific types of general content. The co * To index **documents** directly into {{es}}, use the {{es}} [document APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-document). * To send **application data** directly to {{es}}, use an [{{es}} language client](https://www.elastic.co/guide/en/elasticsearch/client/index.html). * To index **web page content**, use the Elastic [web crawler](https://www.elastic.co/web-crawler). -* To sync **data from third-party sources**, use [connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-connectors.html). A connector syncs content from an original data source to an {{es}} index. Using connectors you can create *searchable*, read-only replicas of your data sources. +* To sync **data from third-party sources**, use [connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/index.md). A connector syncs content from an original data source to an {{es}} index. Using connectors you can create *searchable*, read-only replicas of your data sources. * To index **single files** for testing in a non-production environment, use the {{kib}} [file uploader](ingest/tools/upload-data-files.md). If you would like to try things out before you add your own data, try using our [sample data](ingest/sample-data.md). diff --git a/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md b/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md index 2980812973..898f30b022 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md @@ -25,5 +25,5 @@ Use when Info for air-gapped environments: * [Installing the {{stack}} in an air-gapped environment](../../../deploy-manage/deploy/cloud-enterprise/air-gapped-install.md) -* [Using a proxy server with Elastic Agent and Fleet](https://www.elastic.co/guide/en/fleet/current/fleet-agent-proxy-support.html) +* [Using a proxy server with Elastic Agent and Fleet](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/fleet-agent-proxy-support.md) diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md index 4e4dcbde1b..5376333f7b 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md @@ -32,8 +32,8 @@ Info on {{agent}} and agent integrations: Info on {{ls}} and {{ls}} plugins: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) -* [{{ls}} Kafka output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html) +* [{{ls}} {{agent}} input](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-elastic_agent.md) +* [{{ls}} Kafka output](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-kafka.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md index d7b86c6f53..c6a4c0f7c2 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md @@ -32,10 +32,10 @@ Info on {{agent}} and agent integrations: Info on {{ls}} and {{ls}} Kafka plugins: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) -* [{{ls}} Kafka input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html) -* [{{ls}} Kafka output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html) -* [{{ls}} Elasticsearch output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{ls}} {{agent}} input](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-elastic_agent.md) +* [{{ls}} Kafka input](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-kafka.md) +* [{{ls}} Kafka output](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-kafka.md) +* [{{ls}} Elasticsearch output](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md b/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md index 595035bbc2..34a0d70bc7 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md @@ -25,10 +25,10 @@ Use when Info for air-gapped environments: * [Installing the {{stack}} in an air-gapped environment](../../../deploy-manage/deploy/cloud-enterprise/air-gapped-install.md) -* [Using a proxy server with Elastic Agent and Fleet](https://www.elastic.co/guide/en/fleet/current/fleet-agent-proxy-support.html) +* [Using a proxy server with Elastic Agent and Fleet](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/fleet-agent-proxy-support.md) ## Geoip database management in air-gapped environments [ls-geoip] -The [{{ls}} geoip filter](https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html) requires regular database updates to remain up-to-date with the latest information. If you are using the {{ls}} geoip filter plugin in an air-gapped environment, you can manage updates through a proxy, a custom endpoint, or manually. Check out [Manage your own database updates](https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html#plugins-filters-geoip-manage_update) for more info. +The [{{ls}} geoip filter](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-filters-geoip.md) requires regular database updates to remain up-to-date with the latest information. If you are using the {{ls}} geoip filter plugin in an air-gapped environment, you can manage updates through a proxy, a custom endpoint, or manually. Check out [Manage your own database updates](https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html#plugins-filters-geoip-manage_update) for more info. diff --git a/manage-data/ingest/ingest-reference-architectures/agent-proxy.md b/manage-data/ingest/ingest-reference-architectures/agent-proxy.md index cfad026bac..8e713fe282 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-proxy.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-proxy.md @@ -39,7 +39,7 @@ Info on {{agent}} and agent integrations: Info on using a proxy server: -* [Using a proxy server with {{agent}} and {{fleet}}](https://www.elastic.co/guide/en/fleet/current/fleet-agent-proxy-support.html) +* [Using a proxy server with {{agent}} and {{fleet}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/fleet-agent-proxy-support.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md index 04cdf70f5f..6d2f9404f9 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md @@ -31,14 +31,14 @@ Examples Info on configuring {{agent}}: * [Fleet and Elastic Agent Guide](https://www.elastic.co/guide/en/fleet/current) -* [Configuring outputs for {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-output-configuration.html) +* [Configuring outputs for {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/elastic-agent-output-configuration.md) For info on {{ls}} for enriching data, check out these sections in the [Logstash Reference](https://www.elastic.co/guide/en/logstash/current): -* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) -* [{{ls}} plugins for enriching data](https://www.elastic.co/guide/en/logstash/current/lookup-enrichment.html) -* [Logstash filter plugins](https://www.elastic.co/guide/en/logstash/current/filter-plugins.html) -* [{{ls}} {{es}} output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{ls}} {{agent}} input](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-elastic_agent.md) +* [{{ls}} plugins for enriching data](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/lookup-enrichment.md) +* [Logstash filter plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/filter-plugins.md) +* [{{ls}} {{es}} output](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-for-input.md b/manage-data/ingest/ingest-reference-architectures/ls-for-input.md index 554e883f6a..3b7c630077 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-for-input.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-for-input.md @@ -29,8 +29,8 @@ Info on {{ls}} and {{ls}} input and output plugins: * [{{ls}} plugin support matrix](https://www.elastic.co/support/matrix#logstash_plugins) * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} input plugins](https://www.elastic.co/guide/en/logstash/current/input-plugins.html) -* [{{es}} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{ls}} input plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/input-plugins.md) +* [{{es}} output plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) Info on {{es}} and ingest pipelines: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-multi.md b/manage-data/ingest/ingest-reference-architectures/ls-multi.md index e3b92f27a9..a346577034 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-multi.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-multi.md @@ -57,13 +57,13 @@ output { Info on configuring {{agent}}: * [Fleet and Elastic Agent Guide](https://www.elastic.co/guide/en/fleet/current) -* [Configuring outputs for {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-output-configuration.html) +* [Configuring outputs for {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/elastic-agent-output-configuration.md) Info on {{ls}} and {{ls}} outputs: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {{es}} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) -* [{{ls}} output plugins](https://www.elastic.co/guide/en/logstash/current/output-plugins.html) +* [{{ls}} {{es}} output plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) +* [{{ls}} output plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/output-plugins.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md b/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md index 3ed9a053be..d44e40b938 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md @@ -24,12 +24,12 @@ Example Info on configuring {{agent}}: * [Fleet and Elastic Agent Guide](https://www.elastic.co/guide/en/fleet/current) -* [Configuring outputs for {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-output-configuration.html) +* [Configuring outputs for {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/elastic-agent-output-configuration.md) Info on {{ls}} and {{ls}} plugins: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{es}} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{es}} output plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/lspq.md b/manage-data/ingest/ingest-reference-architectures/lspq.md index 2816bc03c9..21b1cf037c 100644 --- a/manage-data/ingest/ingest-reference-architectures/lspq.md +++ b/manage-data/ingest/ingest-reference-architectures/lspq.md @@ -21,16 +21,16 @@ Use when Info on configuring {{agent}}: * [Fleet and Elastic Agent Guide](https://www.elastic.co/guide/en/fleet/current) -* [Configuring outputs for {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-output-configuration.html) +* [Configuring outputs for {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/elastic-agent-output-configuration.md) For info on {{ls}} plugins: -* [{{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) -* [{{es}} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{agent}} input](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-elastic_agent.md) +* [{{es}} output plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-outputs-elasticsearch.md) For info on using {{ls}} for buffering and data resiliency, check out this section in the [Logstash Reference](https://www.elastic.co/guide/en/logstash/current): -* [{{ls}} Persistent Queues (PQ)](https://www.elastic.co/guide/en/logstash/current/persistent-queues.html) +* [{{ls}} Persistent Queues (PQ)](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/persistent-queues.md) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/use-case-arch.md b/manage-data/ingest/ingest-reference-architectures/use-case-arch.md index 0003026310..76443c1411 100644 --- a/manage-data/ingest/ingest-reference-architectures/use-case-arch.md +++ b/manage-data/ingest/ingest-reference-architectures/use-case-arch.md @@ -24,5 +24,5 @@ You can host {{es}} on your own hardware or send your data to {{es}} on {{ecloud | [*{{agent}} to {{ls}} to Elasticsearch*](agent-ls.md)

![Image showing {{agent}} to {{ls}} to {{es}}](../../../images/ingest-ea-ls-es.png "") | You need additional capabilities offered by {{ls}}:

* [**enrichment**](ls-enrich.md) between {{agent}} and {{es}}
* [**persistent queue (PQ) buffering**](lspq.md) to accommodate network issues and downstream unavailability
* [**proxying**](ls-networkbridge.md) in cases where {{agent}}s have network restrictions for connecting outside of the {{agent}} network
* data needs to be [**routed to multiple**](ls-multi.md) {{es}} clusters and other destinations depending on the content
| | [*{{agent}} to proxy to Elasticsearch*](agent-proxy.md)

![Image showing connections between {{agent}} and {{es}} using a proxy](../../../images/ingest-ea-proxy-es.png "") | Agents have [network restrictions](agent-proxy.md) that prevent connecting outside of the {{agent}} network Note that [{{ls}} as proxy](ls-networkbridge.md) is one option.
| | [*{{agent}} to {{es}} with Kafka as middleware message queue*](agent-kafka-es.md)

![Image showing {{agent}} collecting data and using Kafka as a message queue enroute to {{es}}](../../../images/ingest-ea-kafka.png "") | Kafka is your [middleware message queue](agent-kafka-es.md):

* [Kafka ES sink connector](agent-kafka-essink.md) to write from Kafka to {{es}}
* [{{ls}} to read from Kafka and route to {{es}}](agent-kafka-ls.md)
| -| [*{{ls}} to Elasticsearch*](ls-for-input.md)

![Image showing {{ls}} collecting data and sending to {{es}}](../../../images/ingest-ls-es.png "") | You need to collect data from a source that {{agent}} can’t read (such as databases, AWS Kinesis). Check out the [{{ls}} input plugins](https://www.elastic.co/guide/en/logstash/current/input-plugins.html).
| +| [*{{ls}} to Elasticsearch*](ls-for-input.md)

![Image showing {{ls}} collecting data and sending to {{es}}](../../../images/ingest-ls-es.png "") | You need to collect data from a source that {{agent}} can’t read (such as databases, AWS Kinesis). Check out the [{{ls}} input plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/input-plugins.md).
| | [*Elastic air-gapped architectures*](airgapped-env.md)

![Image showing {{stack}} in an air-gapped environment](../../../images/ingest-ea-airgapped.png "") | You want to deploy {{agent}} and {{stack}} in an air-gapped environment (no access to outside networks)
| diff --git a/manage-data/ingest/ingesting-data-for-elastic-solutions.md b/manage-data/ingest/ingesting-data-for-elastic-solutions.md index 014b8bdd7f..18ba01447f 100644 --- a/manage-data/ingest/ingesting-data-for-elastic-solutions.md +++ b/manage-data/ingest/ingesting-data-for-elastic-solutions.md @@ -14,7 +14,7 @@ To use [Elastic Agent](https://www.elastic.co/guide/en/fleet/current) and [Elast 1. Create an [{{ecloud}}](https://www.elastic.co/cloud) deployment for your solution. If you don’t have an {{ecloud}} account, you can sign up for a [free trial](https://cloud.elastic.co/registration) to get started. 2. Add the [Elastic integration](https://docs.elastic.co/en/integrations) for your data source to the deployment. -3. [Install {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) on the systems whose data you want to collect. +3. [Install {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md) on the systems whose data you want to collect. :::: @@ -38,7 +38,7 @@ To use [Elastic Agent](https://www.elastic.co/guide/en/fleet/current) and [Elast * [{{es}} document APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-document) * [{{es}} language clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html) * [Elastic web crawler](https://www.elastic.co/web-crawler) - * [Elastic connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-connectors.html) + * [Elastic connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/index.md) @@ -49,7 +49,7 @@ With [Elastic Observability](https://www.elastic.co/observability), you can moni **Guides for popular Observability use cases** * [Monitor applications and systems with Elastic Observability](https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current/getting-started-observability.html) -* [Get started with logs and metrics](https://www.elastic.co/guide/en/observability/current/logs-metrics-get-started.html) +* [Get started with logs and metrics](/solutions/observability/infra-and-hosts/get-started-with-system-metrics.md) * [Step 1: Add the {{agent}} System integration](https://www.elastic.co/guide/en/observability/current/logs-metrics-get-started.html#add-system-integration) * [Step 2: Install and run {{agent}}](https://www.elastic.co/guide/en/observability/current/logs-metrics-get-started.html#add-agent-to-fleet) @@ -81,7 +81,7 @@ You can detect and respond to threats when you use [Elastic Security](https://ww * [Install {{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) * [Elastic Security integrations](https://www.elastic.co/integrations/data-integrations?solution=search) -* [Elastic Security documentation](https://www.elastic.co/guide/en/security/current/es-overview.html) +* [Elastic Security documentation](/solutions/security.md) ## Ingesting data for your own custom search solution [ingest-for-custom] diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md index 1cf6cb6716..b569d3ea1d 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md @@ -122,7 +122,7 @@ If you have multiple servers with metrics data, repeat the following steps to co **About Metricbeat modules** -Metricbeat has [many modules](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html) available that collect common metrics. You can [configure additional modules](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-metricbeat.html) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-system.html) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. +Metricbeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-modules.md) available that collect common metrics. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-metricbeat.md) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-system.md) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. **Load the Metricbeat Kibana dashboards** @@ -144,7 +144,7 @@ sudo ./metricbeat setup \ 1. Specify the Cloud ID of your {{ech}} or {{ece}} deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. 2. Specify the username and password provided to you when creating the deployment. Make sure to keep the colon between ** and **.::::{important} -Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of the metricbeat.yml. +Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of the metricbeat.yml. You might encounter similar permissions hurdles as you work through multiple sections of this document. These permission requirements are there for a good reason, a security safeguard to prevent unauthorized access and modification of key Elastic files. @@ -193,7 +193,7 @@ The next step is to configure Filebeat to send operational data to Logstash. As **Enable the Filebeat system module** -Filebeat has [many modules](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html) available that collect common log types. You can [configure additional modules](https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-modules.html) as needed. For this example we’re using Filebeat’s [System module](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-module-system.html). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. +Filebeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-modules.md) available that collect common log types. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/configuration-filebeat-modules.md) as needed. For this example we’re using Filebeat’s [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-module-system.md). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. 1. Go to */filebeat-/modules.d/* where ** is the directory where Filebeat is installed. 2. Filebeat requires at least one fileset to be enabled. In file */filebeat-/modules.d/system.yml.disabled*, under both `syslog` and `auth` set `enabled` to `true`: @@ -245,7 +245,7 @@ Index setup finished. Loading dashboards (Kibana must be running and reachable) Loaded dashboards Setting up ML using setup --machine-learning is going to be removed in 8.0.0. Please use the ML app instead. -See more: https://www.elastic.co/guide/en/machine-learning/current/index.html +See more: /explore-analyze/machine-learning.md Loaded machine learning job configurations Loaded Ingest pipelines ``` @@ -295,7 +295,7 @@ Now the Filebeat and Metricbeat are set up, let’s configure a {{ls}} pipeline 1. {{ls}} listens for Beats input on the default port of 5044. Only one line is needed to do this. {{ls}} can handle input from many Beats of the same and also of varying types (Metricbeat, Filebeat, and others). 2. This sends output to the standard output, which displays through your command line interface. This plugin enables you to verify the data before you send it to {{es}}, in a later step. -3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](https://www.elastic.co/guide/en/logstash/current/config-examples.html). +3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/config-examples.md). ## Output {{ls}} data to stdout [ec-beats-logstash-stdout] @@ -437,7 +437,7 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ``` 1. Use the Cloud ID of your {{ech}} or {{ece}} deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. + 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/feature-roles.md) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. Following are some additional details about the configuration file settings: @@ -529,9 +529,9 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ::::{note} In this guide, you manually launch each of the Elastic stack applications through the command line interface. In production, you may prefer to configure {{ls}}, Metricbeat, and Filebeat to run as System Services. Check the following pages for the steps to configure each application to run as a service: -* [Running {{ls}} as a service on Debian or RPM](https://www.elastic.co/guide/en/logstash/current/running-logstash.html) -* [Metricbeat and systemd](https://www.elastic.co/guide/en/beats/metricbeat/current/running-with-systemd.html) -* [Start filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-starting.html) +* [Running {{ls}} as a service on Debian or RPM](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/running-logstash.md) +* [Metricbeat and systemd](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/running-with-systemd.md) +* [Start filebeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-starting.md) :::: diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-relational-database-into-elasticsearch-service.md b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-relational-database-into-elasticsearch-service.md index 46400a1403..5dceee337c 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-relational-database-into-elasticsearch-service.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-relational-database-into-elasticsearch-service.md @@ -40,7 +40,7 @@ $$$ece-db-logstash-pipeline$$$ $$$ece-db-logstash-prerequisites$$$ -This guide explains how to ingest data from a relational database into {{ess}} through [{{ls}}](https://www.elastic.co/guide/en/logstash/current/introduction.html), using the Logstash [JDBC input plugin](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an {{ech}} or {{ece}} deployment. +This guide explains how to ingest data from a relational database into {{ess}} through [{{ls}}](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md), using the Logstash [JDBC input plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-jdbc.md). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an {{ech}} or {{ece}} deployment. The code and methods presented here have been tested with MySQL. They should work with other relational databases. @@ -330,7 +330,7 @@ In this section, we configure Logstash to send the MySQL data to Elasticsearch. ``` 1. Use the Cloud ID of your {{ech}} or {{ece}} deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. + 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/secure-connection.md) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. Following are some additional details about the configuration file settings: diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md b/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md index 81f0643e4e..6a7a5a8c27 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md @@ -168,7 +168,7 @@ async function run() { run().catch(console.log) ``` -When using the [client.index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. +When using the [client.index](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/api-reference.md#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. ## Search and modify data [ec_search_and_modify_data] @@ -215,7 +215,7 @@ async function update() { update().catch(console.log) ``` -This [more comprehensive list of API examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). +This [more comprehensive list of API examples](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/examples.md) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). ## Switch to API key authentication [ec_switch_to_api_key_authentication] @@ -302,11 +302,11 @@ Security Connections ({{ech}} only) -: If your application connecting to {{ech}} runs under the Java security manager, you should at least disable the caching of positive hostname resolutions. To learn more, check the [Java API Client documentation](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current/_others.html). +: If your application connecting to {{ech}} runs under the Java security manager, you should at least disable the caching of positive hostname resolutions. To learn more, check the [Java API Client documentation](asciidocalypse://docs/elasticsearch-java/docs/reference/elasticsearch/elasticsearch-client-java-api-client/_others.md). Schema : When the example code was run an index mapping was created automatically. The field types were selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you are designing the schema for your production use cases. Ingest -: For more advanced scenarios, this [bulk ingestion](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/bulk_examples.html) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, this [bulk ingestion](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/bulk_examples.md) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md b/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md index 0349583259..1e57fb87e6 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md @@ -293,7 +293,7 @@ es.get(index='lord-of-the-rings', id='2EkAzngB_pyHD3p65UMt') 'birthplace': 'The Shire'}} ``` -For frequently used API calls with the Python client, check [Examples](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/examples.html). +For frequently used API calls with the Python client, check [Examples](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/examples.md). ## Switch to API key authentication [ec_switch_to_api_key_authentication_2] @@ -368,5 +368,5 @@ Schema : When the example code is run, an index mapping is created automatically. The field types are selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you design the schema for your production use cases. Ingest -: For more advanced scenarios, [Bulk helpers](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/client-helpers.html#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, [Bulk helpers](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/client-helpers.md#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-nodejs-web-application-using-filebeat.md b/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-nodejs-web-application-using-filebeat.md index 1b5c2e46d6..1b17ed3c72 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-nodejs-web-application-using-filebeat.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-nodejs-web-application-using-filebeat.md @@ -49,7 +49,7 @@ $$$ece-node-logs-send-ess$$$ $$$ece-node-logs-view-kibana$$$ -This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an {{ech}} or {{ece}} deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html#_get_started). +This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an {{ech}} or {{ece}} deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md#_get_started). *Time required: 1.5 hours* @@ -71,7 +71,7 @@ For the three following packages, you can create a working directory to install npm install winston ``` -* The [Elastic Common Schema (ECS) formatter](https://www.elastic.co/guide/en/ecs-logging/nodejs/current/winston.html) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: +* The [Elastic Common Schema (ECS) formatter](asciidocalypse://docs/ecs-logging-nodejs/docs/reference/ecs/ecs-logging-nodejs/winston.md) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: ```sh npm install @elastic/ecs-winston-format @@ -347,7 +347,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -383,7 +383,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -484,7 +484,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -567,5 +567,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat, the Node.js web server, and the client. Enter *CTRL + C* in the terminal window for each application to stop them. -You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an {{ech}} or {{ece}} deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about ingesting data. +You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an {{ech}} or {{ece}} deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about ingesting data. diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-python-application-using-filebeat.md b/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-python-application-using-filebeat.md index 54b3f39fb8..bfd7a25f58 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-python-application-using-filebeat.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-logs-from-python-application-using-filebeat.md @@ -33,13 +33,13 @@ $$$ece-python-logs-send-ess$$$ $$$ece-python-logs-view-kibana$$$ -This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html). +This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md). *Time required: 1 hour* ## Prerequisites [ec_prerequisites_2] -To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](https://www.elastic.co/guide/en/ecs-logging/python/current/installation.html) for the Python logging library. +To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](asciidocalypse://docs/ecs-logging-python/docs/reference/ecs/ecs-logging-python/installation.md) for the Python logging library. To install *ecs-logging-python*, run: @@ -140,7 +140,7 @@ In this step, you’ll create a Python script that generates logs in JSON format Having your logs written in a JSON format with ECS fields allows for easy parsing and analysis, and for standardization with other applications. A standard, easily parsible format becomes increasingly important as the volume and type of data captured in your logs expands over time. - Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](https://www.elastic.co/guide/en/ecs/current/ecs-field-reference.html) for the full list of available fields. + Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-field-reference.md) for the full list of available fields. 2. Let’s give the Python script a test run. Open a terminal instance in the location where you saved *elvis.py* and run the following: @@ -226,7 +226,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -262,7 +262,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -368,7 +368,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -446,5 +446,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat and the Python script. Enter *CTRL + C* in both your Filebeat terminal and in your `elvis.py` terminal. -You now know how to monitor log files from a Python application, deliver the log event data securely into an {{ech}} or {{ece}} deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about all about ingesting data. +You now know how to monitor log files from a Python application, deliver the log event data securely into an {{ech}} or {{ece}} deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about all about ingesting data. diff --git a/manage-data/ingest/ingesting-timeseries-data.md b/manage-data/ingest/ingesting-timeseries-data.md index 2ca26671a6..0bba4d84f7 100644 --- a/manage-data/ingest/ingesting-timeseries-data.md +++ b/manage-data/ingest/ingesting-timeseries-data.md @@ -9,7 +9,7 @@ Elastic and others offer tools to help you get your data from the original data In this section, we’ll help you determine which option is best for you. -* [{{agent}} and Elastic integrations](https://www.elastic.co/guide/en/ingest-overview/current/ingest-tools.html#ingest-ea) +* [{{agent}} and Elastic integrations](/manage-data/ingest/ingesting-timeseries-data.md#ingest-ea) * [{{beats}}](https://www.elastic.co/guide/en/ingest-overview/current/ingest-tools.html#ingest-beats) * [OpenTelemetry (OTel) collectors](https://www.elastic.co/guide/en/ingest-overview/current/ingest-tools.html#ingest-otel) * [Logstash](https://www.elastic.co/guide/en/ingest-overview/current/ingest-tools.html#ingest-logstash) @@ -17,22 +17,22 @@ In this section, we’ll help you determine which option is best for you. ## {{agent}} and Elastic integrations [ingest-ea] -A single [{{agent}}](https://www.elastic.co/guide/en/fleet/current) can collect multiple types of data when it is [installed](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) on a host computer. You can use standalone {{agent}}s and manage them locally on the systems where they are installed, or you can manage all of your agents and policies with the [Fleet UI in {{kib}}](https://www.elastic.co/guide/en/fleet/current/manage-agents-in-fleet.html). +A single [{{agent}}](https://www.elastic.co/guide/en/fleet/current) can collect multiple types of data when it is [installed](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md) on a host computer. You can use standalone {{agent}}s and manage them locally on the systems where they are installed, or you can manage all of your agents and policies with the [Fleet UI in {{kib}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/manage-elastic-agents-in-fleet.md). Use {{agent}} with one of hundreds of [Elastic integrations](https://docs.elastic.co/en/integrations) to simplify collecting, transforming, and visualizing data. Integrations include default ingestion rules, dashboards, and visualizations to help you start analyzing your data right away. Check out the [Integration quick reference](https://docs.elastic.co/en/integrations/all_integrations) to search for available integrations that can reduce your time to value. -{{agent}} is the best option for collecting timestamped data for most data sources and use cases. If your data requires additional processing before going to {{es}}, you can use [{{agent}} processors](https://www.elastic.co/guide/en/fleet/current/elastic-agent-processor-configuration.html), [{{ls}}](https://www.elastic.co/guide/en/logstash/current), or additional processing features in {{es}}. Check out [additional processing](https://www.elastic.co/guide/en/ingest-overview/current/ingest-addl-proc.html) to see options. +{{agent}} is the best option for collecting timestamped data for most data sources and use cases. If your data requires additional processing before going to {{es}}, you can use [{{agent}} processors](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/agent-processors.md), [{{ls}}](https://www.elastic.co/guide/en/logstash/current), or additional processing features in {{es}}. Check out [additional processing](/manage-data/ingest/transform-enrich.md) to see options. Ready to try [{{agent}}](https://www.elastic.co/guide/en/fleet/current)? Check out the [installation instructions](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html). ## {{beats}} [ingest-beats] -[Beats](https://www.elastic.co/guide/en/beats/libbeat/current/beats-reference.html) are the original Elastic lightweight data shippers, and their capabilities live on in Elastic Agent. When you use Elastic Agent, you’re getting core Beats functionality, but with more added features. +[Beats](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md) are the original Elastic lightweight data shippers, and their capabilities live on in Elastic Agent. When you use Elastic Agent, you’re getting core Beats functionality, but with more added features. Beats require that you install a separate Beat for each type of data you want to collect. A single Elastic Agent installed on a host can collect and transport multiple types of data. -**Best practice:** Use [{{agent}}](https://www.elastic.co/guide/en/fleet/current) whenever possible. If your data source is not yet supported by {{agent}}, use {{beats}}. Check out the {{beats}} and {{agent}} [comparison](https://www.elastic.co/guide/en/fleet/current/beats-agent-comparison.html#additional-capabilities-beats-and-agent) for more info. When you are ready to upgrade, check out [Migrate from {{beats}} to {{agent}}](https://www.elastic.co/guide/en/fleet/current/migrate-beats-to-agent.html). +**Best practice:** Use [{{agent}}](https://www.elastic.co/guide/en/fleet/current) whenever possible. If your data source is not yet supported by {{agent}}, use {{beats}}. Check out the {{beats}} and {{agent}} [comparison](/manage-data/ingest/tools.md#additional-capabilities-beats-and-agent) for more info. When you are ready to upgrade, check out [Migrate from {{beats}} to {{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/migrate-from-beats-to-elastic-agent.md). ## OpenTelemetry (OTel) collectors [ingest-otel] @@ -44,10 +44,10 @@ In addition to supporting upstream OTel development, Elastic provides [Elastic D ## Logstash [ingest-logstash] -[{{ls}}](https://www.elastic.co/guide/en/logstash/current) is a versatile open source data ETL (extract, transform, load) engine that can expand your ingest capabilities. {{ls}} can *collect data* from a wide variety of data sources with {{ls}} [input plugins](https://www.elastic.co/guide/en/logstash/current/input-plugins.html), *enrich and transform* the data with {{ls}} [filter plugins](https://www.elastic.co/guide/en/logstash/current/filter-plugins.html), and *output* the data to {{es}} and other destinations with the {{ls}} [output plugins](https://www.elastic.co/guide/en/logstash/current/output-plugins.html). +[{{ls}}](https://www.elastic.co/guide/en/logstash/current) is a versatile open source data ETL (extract, transform, load) engine that can expand your ingest capabilities. {{ls}} can *collect data* from a wide variety of data sources with {{ls}} [input plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/input-plugins.md), *enrich and transform* the data with {{ls}} [filter plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/filter-plugins.md), and *output* the data to {{es}} and other destinations with the {{ls}} [output plugins](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/output-plugins.md). Many users never need to use {{ls}}, but it’s available if you need it for: * **Data collection** (if an Elastic integration isn’t available). {{agent}} and Elastic [integrations](https://docs.elastic.co/en/integrations/all_integrations) provide many features out-of-the-box, so be sure to search or browse integrations for your data source. If you don’t find an Elastic integration for your data source, check {{ls}} for an [input plugin](https://www.elastic.co/guide/en/logstash/current/input-plugins.html) for your data source. -* **Additional processing.** One of the most common {{ls}} use cases is [extending Elastic integrations](https://www.elastic.co/guide/en/logstash/current/ea-integrations.html). You can take advantage of the extensive, built-in capabilities of Elastic Agent and Elastic Integrations, and then use {{ls}} for additional data processing before sending the data on to {{es}}. -* **Advanced use cases.** {{ls}} can help with advanced use cases, such as when you need [persistence or buffering](https://www.elastic.co/guide/en/ingest/current/lspq.html), additional [data enrichment](https://www.elastic.co/guide/en/ingest/current/ls-enrich.html), [proxying](https://www.elastic.co/guide/en/ingest/current/ls-networkbridge.html) as a way to bridge network connections, or the ability to route data to [multiple destinations](https://www.elastic.co/guide/en/ingest/current/ls-multi.html). +* **Additional processing.** One of the most common {{ls}} use cases is [extending Elastic integrations](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/using-logstash-with-elastic-integrations.md). You can take advantage of the extensive, built-in capabilities of Elastic Agent and Elastic Integrations, and then use {{ls}} for additional data processing before sending the data on to {{es}}. +* **Advanced use cases.** {{ls}} can help with advanced use cases, such as when you need [persistence or buffering](/manage-data/ingest/ingest-reference-architectures/lspq.md), additional [data enrichment](/manage-data/ingest/ingest-reference-architectures/ls-enrich.md), [proxying](/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md) as a way to bridge network connections, or the ability to route data to [multiple destinations](/manage-data/ingest/ingest-reference-architectures/ls-multi.md). diff --git a/manage-data/ingest/transform-enrich.md b/manage-data/ingest/transform-enrich.md index 70de740bbe..c4c597a2e4 100644 --- a/manage-data/ingest/transform-enrich.md +++ b/manage-data/ingest/transform-enrich.md @@ -16,7 +16,7 @@ According to your use case, you may want to control the structure of your ingest Finally, to help ensure optimal query results, you may want to customize how text is analyzed and how text fields are defined inside {{es}}. {{agent}} processors -: You can use [{{agent}} processors](https://www.elastic.co/guide/en/fleet/current/elastic-agent-processor-configuration.html) to sanitize or enrich raw data at the source. Use {{agent}} processors if you need to control what data is sent across the wire, or if you need to enrich the raw data with information available on the host. +: You can use [{{agent}} processors](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/agent-processors.md) to sanitize or enrich raw data at the source. Use {{agent}} processors if you need to control what data is sent across the wire, or if you need to enrich the raw data with information available on the host. {{es}} ingest pipelines : You can use [{{es}} ingest pipelines](transform-enrich/ingest-pipelines.md) to enrich incoming data or normalize field data before the data is indexed. {{es}} ingest pipelines enable you to manipulate the data as it comes in. This approach helps you avoid adding processing overhead to the hosts from which you’re collecting data. @@ -33,7 +33,7 @@ Finally, to help ensure optimal query results, you may want to customize how tex {{ls}} and the {{ls}} `elastic_integration filter` : If you're using {{ls}} as your primary ingest tool, you can take advantage of its built-in pipeline capabilities to transform your data. You configure a pipeline by stringing together a series of input, output, filtering, and optional codec plugins to manipulate all incoming data. -: If you're ingesting using {{agent}} with Elastic {{integrations}}, you can use the {{ls}} [`elastic_integration filter`](https://www.elastic.co/guide/en/logstash/current/) and other [{{ls}} filters](https://www.elastic.co/guide/en/logstash/current/filter-plugins.html) to [extend Elastic integrations](https://www.elastic.co/guide/en/logstash/current/ea-integrations.html) by transforming data before it goes to {{es}}. +: If you're ingesting using {{agent}} with Elastic {{integrations}}, you can use the {{ls}} [`elastic_integration filter`](https://www.elastic.co/guide/en/logstash/current/) and other [{{ls}} filters](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/filter-plugins.md) to [extend Elastic integrations](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/using-logstash-with-elastic-integrations.md) by transforming data before it goes to {{es}}. Index mapping : Index mapping lets you control the structure that incoming data has within an {{es}} index. You can define all of the fields that are included in the index and their respective data types. For example, you can set fields for dates, numbers, or geolocations, and define the fields to have specific formats. diff --git a/manage-data/ingest/transform-enrich/example-enrich-data-based-on-exact-values.md b/manage-data/ingest/transform-enrich/example-enrich-data-based-on-exact-values.md index 585eac0f2d..6b5c84b6ce 100644 --- a/manage-data/ingest/transform-enrich/example-enrich-data-based-on-exact-values.md +++ b/manage-data/ingest/transform-enrich/example-enrich-data-based-on-exact-values.md @@ -5,7 +5,7 @@ mapped_pages: # Example: Enrich your data based on exact values [match-enrich-policy-type] -`match` [enrich policies](data-enrichment.md#enrich-policy) match enrich data to incoming documents based on an exact value, such as a email address or ID, using a [`term` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html). +`match` [enrich policies](data-enrichment.md#enrich-policy) match enrich data to incoming documents based on an exact value, such as a email address or ID, using a [`term` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-term-query.md). The following example creates a `match` enrich policy that adds user name and contact information to incoming documents based on an email address. It then adds the `match` enrich policy to a processor in an ingest pipeline. @@ -50,7 +50,7 @@ Use the [execute enrich policy API](https://www.elastic.co/docs/api/doc/elastics POST /_enrich/policy/users-policy/_execute?wait_for_completion=false ``` -Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html) that includes: +Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/enrich-processor.md) that includes: * Your enrich policy. * The `field` of incoming documents used to match documents from the enrich index. diff --git a/manage-data/ingest/transform-enrich/example-enrich-data-based-on-geolocation.md b/manage-data/ingest/transform-enrich/example-enrich-data-based-on-geolocation.md index ddffd23331..51c8cc7d42 100644 --- a/manage-data/ingest/transform-enrich/example-enrich-data-based-on-geolocation.md +++ b/manage-data/ingest/transform-enrich/example-enrich-data-based-on-geolocation.md @@ -5,7 +5,7 @@ mapped_pages: # Example: Enrich your data based on geolocation [geo-match-enrich-policy-type] -`geo_match` [enrich policies](data-enrichment.md#enrich-policy) match enrich data to incoming documents based on a geographic location, using a [`geo_shape` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-shape-query.html). +`geo_match` [enrich policies](data-enrichment.md#enrich-policy) match enrich data to incoming documents based on a geographic location, using a [`geo_shape` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-geo-shape-query.md). The following example creates a `geo_match` enrich policy that adds postal codes to incoming documents based on a set of coordinates. It then adds the `geo_match` enrich policy to a processor in an ingest pipeline. @@ -63,12 +63,12 @@ Use the [execute enrich policy API](https://www.elastic.co/docs/api/doc/elastics POST /_enrich/policy/postal_policy/_execute?wait_for_completion=false ``` -Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html) that includes: +Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/enrich-processor.md) that includes: * Your enrich policy. * The `field` of incoming documents used to match the geoshape of documents from the enrich index. * The `target_field` used to store appended enrich data for incoming documents. This field contains the `match_field` and `enrich_fields` specified in your enrich policy. -* The `shape_relation`, which indicates how the processor matches geoshapes in incoming documents to geoshapes in documents from the enrich index. See [Spatial Relations](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-shape-query.html#_spatial_relations) for valid options and more information. +* The `shape_relation`, which indicates how the processor matches geoshapes in incoming documents to geoshapes in documents from the enrich index. See [Spatial Relations](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-shape-query.md#_spatial_relations) for valid options and more information. ```console PUT /_ingest/pipeline/postal_lookup diff --git a/manage-data/ingest/transform-enrich/example-enrich-data-by-matching-value-to-range.md b/manage-data/ingest/transform-enrich/example-enrich-data-by-matching-value-to-range.md index b7d2d6d540..09db701a15 100644 --- a/manage-data/ingest/transform-enrich/example-enrich-data-by-matching-value-to-range.md +++ b/manage-data/ingest/transform-enrich/example-enrich-data-by-matching-value-to-range.md @@ -5,7 +5,7 @@ mapped_pages: # Example: Enrich your data by matching a value to a range [range-enrich-policy-type] -A `range` [enrich policy](data-enrichment.md#enrich-policy) uses a [`term` query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html) to match a number, date, or IP address in incoming documents to a range of the same type in the enrich index. Matching a range to a range is not supported. +A `range` [enrich policy](data-enrichment.md#enrich-policy) uses a [`term` query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-term-query.md) to match a number, date, or IP address in incoming documents to a range of the same type in the enrich index. Matching a range to a range is not supported. The following example creates a `range` enrich policy that adds a descriptive network name and responsible department to incoming documents based on an IP address. It then adds the enrich policy to a processor in an ingest pipeline. @@ -60,7 +60,7 @@ Use the [execute enrich policy API](https://www.elastic.co/docs/api/doc/elastics POST /_enrich/policy/networks-policy/_execute?wait_for_completion=false ``` -Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html) that includes: +Use the [create or update pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) to create an ingest pipeline. In the pipeline, add an [enrich processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/enrich-processor.md) that includes: * Your enrich policy. * The `field` of incoming documents used to match documents from the enrich index. diff --git a/manage-data/ingest/transform-enrich/example-parse-logs.md b/manage-data/ingest/transform-enrich/example-parse-logs.md index 6b2f4d08c6..30226559a1 100644 --- a/manage-data/ingest/transform-enrich/example-parse-logs.md +++ b/manage-data/ingest/transform-enrich/example-parse-logs.md @@ -28,7 +28,7 @@ These logs contain a timestamp, IP address, and user agent. You want to give the 2. Click **Create pipeline > New pipeline**. 3. Set **Name** to `my-pipeline` and optionally add a description for the pipeline. -4. Add a [grok processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html) to parse the log message: +4. Add a [grok processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/grok-processor.md) to parse the log message: 1. Click **Add a processor** and select the **Grok** processor type. 2. Set **Field** to `message` and **Patterns** to the following [grok pattern](../../../explore-analyze/scripting/grok.md): @@ -44,9 +44,9 @@ These logs contain a timestamp, IP address, and user agent. You want to give the | Processor type | Field | Additional options | Description | | --- | --- | --- | --- | - | [**Date**](https://www.elastic.co/guide/en/elasticsearch/reference/current/date-processor.html) | `@timestamp` | **Formats**: `dd/MMM/yyyy:HH:mm:ss Z` | `Format '@timestamp' as 'dd/MMM/yyyy:HH:mm:ss Z'` | - | [**GeoIP**](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) | `source.ip` | **Target field**: `source.geo` | `Add 'source.geo' GeoIP data for 'source.ip'` | - | [**User agent**](https://www.elastic.co/guide/en/elasticsearch/reference/current/user-agent-processor.html) | `user_agent` | | `Extract fields from 'user_agent'` | + | [**Date**](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/date-processor.md) | `@timestamp` | **Formats**: `dd/MMM/yyyy:HH:mm:ss Z` | `Format '@timestamp' as 'dd/MMM/yyyy:HH:mm:ss Z'` | + | [**GeoIP**](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) | `source.ip` | **Target field**: `source.geo` | `Add 'source.geo' GeoIP data for 'source.ip'` | + | [**User agent**](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/user-agent-processor.md) | `user_agent` | | `Extract fields from 'user_agent'` | Your form should look similar to this: @@ -132,7 +132,7 @@ These logs contain a timestamp, IP address, and user agent. You want to give the } ``` -12. To verify, search the data stream to retrieve the document. The following search uses [`filter_path`](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#common-options-response-filtering) to return only the [document source](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html). +12. To verify, search the data stream to retrieve the document. The following search uses [`filter_path`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/common-options.md#common-options-response-filtering) to return only the [document source](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md). ```console GET my-data-stream/_search?filter_path=hits.hits._source diff --git a/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md b/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md index 3f25cd2fdf..85ac848d0f 100644 --- a/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md +++ b/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md @@ -30,7 +30,7 @@ In **{{project-settings}} → {{manage-app}} → {{ingest-pipelines-app}}**, you To create a pipeline, click **Create pipeline → New pipeline**. For an example tutorial, see [Example: Parse logs](example-parse-logs.md). -The **New pipeline from CSV** option lets you use a file with comma-separated values (CSV) to create an ingest pipeline that maps custom data to the Elastic Common Schema (ECS). Mapping your custom data to ECS makes the data easier to search and lets you reuse visualizations from other data sets. To get started, check [Map custom data to ECS](https://www.elastic.co/guide/en/ecs/current/ecs-converting.html). +The **New pipeline from CSV** option lets you use a file with comma-separated values (CSV) to create an ingest pipeline that maps custom data to the Elastic Common Schema (ECS). Mapping your custom data to ECS makes the data easier to search and lets you reuse visualizations from other data sets. To get started, check [Map custom data to ECS](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-converting.md). ## Test pipelines [ingest-pipelines-test-pipelines] diff --git a/manage-data/ingest/transform-enrich/ingest-pipelines.md b/manage-data/ingest/transform-enrich/ingest-pipelines.md index 0afcb763bf..36f5d23c6d 100644 --- a/manage-data/ingest/transform-enrich/ingest-pipelines.md +++ b/manage-data/ingest/transform-enrich/ingest-pipelines.md @@ -7,7 +7,7 @@ mapped_urls: {{es}} ingest pipelines let you perform common transformations on your data before indexing. For example, you can use pipelines to remove fields, extract values from text, and enrich your data. -A pipeline consists of a series of configurable tasks called [processors](https://www.elastic.co/guide/en/elasticsearch/reference/current/processors.html). Each processor runs sequentially, making specific changes to incoming documents. After the processors have run, {{es}} adds the transformed documents to your data stream or index. +A pipeline consists of a series of configurable tasks called [processors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/index.md). Each processor runs sequentially, making specific changes to incoming documents. After the processors have run, {{es}} adds the transformed documents to your data stream or index. :::{image} ../../../images/elasticsearch-reference-ingest-process.svg :alt: Ingest pipeline diagram @@ -42,11 +42,11 @@ In {{kib}}, open the main menu and click **Stack Management > Ingest Pipelines** To create a pipeline, click **Create pipeline > New pipeline**. For an example tutorial, see [Example: Parse logs](example-parse-logs.md). ::::{tip} -The **New pipeline from CSV** option lets you use a CSV to create an ingest pipeline that maps custom data to the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current). Mapping your custom data to ECS makes the data easier to search and lets you reuse visualizations from other datasets. To get started, check [Map custom data to ECS](https://www.elastic.co/guide/en/ecs/current/ecs-converting.html). +The **New pipeline from CSV** option lets you use a CSV to create an ingest pipeline that maps custom data to the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current). Mapping your custom data to ECS makes the data easier to search and lets you reuse visualizations from other datasets. To get started, check [Map custom data to ECS](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-converting.md). :::: -You can also use the [ingest APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ingest) to create and manage pipelines. The following [create pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) request creates a pipeline containing two [`set`](https://www.elastic.co/guide/en/elasticsearch/reference/current/set-processor.html) processors followed by a [`lowercase`](https://www.elastic.co/guide/en/elasticsearch/reference/current/lowercase-processor.html) processor. The processors run sequentially in the order specified. +You can also use the [ingest APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ingest) to create and manage pipelines. The following [create pipeline API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-pipeline) request creates a pipeline containing two [`set`](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/set-processor.md) processors followed by a [`lowercase`](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/lowercase-processor.md) processor. The processors run sequentially in the order specified. ```console PUT _ingest/pipeline/my-pipeline @@ -225,7 +225,7 @@ POST _reindex ## Set a default pipeline [set-default-pipeline] -Use the [`index.default_pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-default-pipeline) index setting to set a default pipeline. {{es}} applies this pipeline to indexing requests if no `pipeline` parameter is specified. +Use the [`index.default_pipeline`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-default-pipeline) index setting to set a default pipeline. {{es}} applies this pipeline to indexing requests if no `pipeline` parameter is specified. ## Set a final pipeline [set-final-pipeline] @@ -246,9 +246,9 @@ output.elasticsearch: ## Pipelines for {{fleet}} and {{agent}} [pipelines-for-fleet-elastic-agent] -{{agent}} integrations ship with default ingest pipelines that preprocess and enrich data before indexing. [{{fleet}}](https://www.elastic.co/guide/en/fleet/current/index.html) applies these pipelines using [index templates](../../data-store/templates.md) that include [pipeline index settings](ingest-pipelines.md#set-default-pipeline). {{es}} matches these templates to your {{fleet}} data streams based on the [stream’s naming scheme](https://www.elastic.co/guide/en/fleet/current/data-streams.html#data-streams-naming-scheme). +{{agent}} integrations ship with default ingest pipelines that preprocess and enrich data before indexing. [{{fleet}}](https://www.elastic.co/guide/en/fleet/current/index.html) applies these pipelines using [index templates](../../data-store/templates.md) that include [pipeline index settings](ingest-pipelines.md#set-default-pipeline). {{es}} matches these templates to your {{fleet}} data streams based on the [stream’s naming scheme](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/data-streams.md#data-streams-naming-scheme). -Each default integration pipeline calls a nonexistent, unversioned `*@custom` ingest pipeline. If unaltered, this pipeline call has no effect on your data. However, you can modify this call to create custom pipelines for integrations that persist across upgrades. Refer to [Tutorial: Transform data with custom ingest pipelines](https://www.elastic.co/guide/en/fleet/current/data-streams-pipeline-tutorial.html) to learn more. +Each default integration pipeline calls a nonexistent, unversioned `*@custom` ingest pipeline. If unaltered, this pipeline call has no effect on your data. However, you can modify this call to create custom pipelines for integrations that persist across upgrades. Refer to [Tutorial: Transform data with custom ingest pipelines](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/data-streams-pipeline-tutorial.md) to learn more. {{fleet}} doesn’t provide a default ingest pipeline for the **Custom logs** integration, but you can specify a pipeline for this integration using an [index template](ingest-pipelines.md#pipeline-custom-logs-index-template) or a [custom configuration](ingest-pipelines.md#pipeline-custom-logs-configuration). @@ -342,12 +342,12 @@ $$$pipeline-custom-logs-configuration$$$ **{{agent}} standalone** -If you run {{agent}} standalone, you can apply pipelines using an [index template](../../data-store/templates.md) that includes the [`index.default_pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-default-pipeline) or [`index.final_pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-final-pipeline) index setting. Alternatively, you can specify the `pipeline` policy setting in your `elastic-agent.yml` configuration. See [Install standalone {{agent}}s](https://www.elastic.co/guide/en/fleet/current/install-standalone-elastic-agent.html). +If you run {{agent}} standalone, you can apply pipelines using an [index template](../../data-store/templates.md) that includes the [`index.default_pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-default-pipeline) or [`index.final_pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-final-pipeline) index setting. Alternatively, you can specify the `pipeline` policy setting in your `elastic-agent.yml` configuration. See [Install standalone {{agent}}s](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-standalone-elastic-agent.md). ## Pipelines for search indices [pipelines-in-enterprise-search] -When you create Elasticsearch indices for search use cases, for example, using the [web crawler^](https://www.elastic.co/guide/en/enterprise-search/current/crawler.html) or [connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-connectors.html), these indices are automatically set up with specific ingest pipelines. These processors help optimize your content for search. See [*Ingest pipelines in Search*](../../../solutions/search/ingest-for-search.md) for more information. +When you create Elasticsearch indices for search use cases, for example, using the [web crawler^](https://www.elastic.co/guide/en/enterprise-search/current/crawler.html) or [connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/index.md), these indices are automatically set up with specific ingest pipelines. These processors help optimize your content for search. See [*Ingest pipelines in Search*](../../../solutions/search/ingest-for-search.md) for more information. ## Access source fields in a processor [access-source-fields] @@ -387,7 +387,7 @@ PUT _ingest/pipeline/my-pipeline Use dot notation to access object fields. ::::{important} -If your document contains flattened objects, use the [`dot_expander`](https://www.elastic.co/guide/en/elasticsearch/reference/current/dot-expand-processor.html) processor to expand them first. Other ingest processors cannot access flattened objects. +If your document contains flattened objects, use the [`dot_expander`](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/dot-expand-processor.md) processor to expand them first. Other ingest processors cannot access flattened objects. :::: @@ -633,10 +633,10 @@ PUT _ingest/pipeline/my-pipeline ## Conditionally run a processor [conditionally-run-processor] -Each processor supports an optional `if` condition, written as a [Painless script](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-guide.html). If provided, the processor only runs when the `if` condition is `true`. +Each processor supports an optional `if` condition, written as a [Painless script](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless.md). If provided, the processor only runs when the `if` condition is `true`. ::::{important} -`if` condition scripts run in Painless’s [ingest processor context](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-ingest-processor-context.html). In `if` conditions, `ctx` values are read-only. +`if` condition scripts run in Painless’s [ingest processor context](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-ingest-processor-context.md). In `if` conditions, `ctx` values are read-only. :::: @@ -654,7 +654,7 @@ PUT _ingest/pipeline/my-pipeline } ``` -If the [`script.painless.regex.enabled`](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#script-painless-regex-enabled) cluster setting is enabled, you can use regular expressions in your `if` condition scripts. For supported syntax, see [Painless regular expressions](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-regexes.html). +If the [`script.painless.regex.enabled`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md#script-painless-regex-enabled) cluster setting is enabled, you can use regular expressions in your `if` condition scripts. For supported syntax, see [Painless regular expressions](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-regexes.md). ::::{tip} If possible, avoid using regular expressions. Expensive regular expressions can slow indexing speeds. @@ -742,7 +742,7 @@ PUT _ingest/pipeline/my-pipeline } ``` -Incoming documents often contain object fields. If a processor script attempts to access a field whose parent object does not exist, {{es}} returns a `NullPointerException`. To avoid these exceptions, use [null safe operators](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-operators-reference.html#null-safe-operator), such as `?.`, and write your scripts to be null safe. +Incoming documents often contain object fields. If a processor script attempts to access a field whose parent object does not exist, {{es}} returns a `NullPointerException`. To avoid these exceptions, use [null safe operators](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/painless-operators-reference.md#null-safe-operator), such as `?.`, and write your scripts to be null safe. For example, `ctx.network?.name.equalsIgnoreCase('Guest')` is not null safe. `ctx.network?.name` can return null. Rewrite the script as `'Guest'.equalsIgnoreCase(ctx.network?.name)`, which is null safe because `Guest` is always non-null. @@ -765,7 +765,7 @@ PUT _ingest/pipeline/my-pipeline ## Conditionally apply pipelines [conditionally-apply-pipelines] -Combine an `if` condition with the [`pipeline`](https://www.elastic.co/guide/en/elasticsearch/reference/current/pipeline-processor.html) processor to apply other pipelines to documents based on your criteria. You can use this pipeline as the [default pipeline](ingest-pipelines.md#set-default-pipeline) in an [index template](../../data-store/templates.md) used to configure multiple data streams or indices. +Combine an `if` condition with the [`pipeline`](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/pipeline-processor.md) processor to apply other pipelines to documents based on your criteria. You can use this pipeline as the [default pipeline](ingest-pipelines.md#set-default-pipeline) in an [index template](../../data-store/templates.md) used to configure multiple data streams or indices. ```console PUT _ingest/pipeline/one-pipeline-to-rule-them-all diff --git a/manage-data/ingest/transform-enrich/logstash-pipelines.md b/manage-data/ingest/transform-enrich/logstash-pipelines.md index 1698d78e0d..3045b13af9 100644 --- a/manage-data/ingest/transform-enrich/logstash-pipelines.md +++ b/manage-data/ingest/transform-enrich/logstash-pipelines.md @@ -25,7 +25,7 @@ After you configure {{ls}} to use centralized pipeline management, you can no lo ## Manage pipelines [logstash-pipelines-manage-pipelines] -1. [Configure centralized pipeline management](https://www.elastic.co/guide/en/logstash/current/configuring-centralized-pipelines.html). +1. [Configure centralized pipeline management](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/configuring-centralized-pipelines.md). 2. To add a new pipeline, go to **{{project-settings}} → {{manage-app}} → {{ls-pipelines-app}}** and click **Create pipeline**. Provide the following details, then click **Create and deploy**. Pipeline ID @@ -58,4 +58,4 @@ After you configure {{ls}} to use centralized pipeline management, you can no lo To delete one or more pipelines, select their checkboxes then click **Delete**. -For more information about pipeline behavior, go to [Centralized Pipeline Management](https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html#_pipeline_behavior). +For more information about pipeline behavior, go to [Centralized Pipeline Management](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/logstash-centralized-pipeline-management.md#_pipeline_behavior). diff --git a/manage-data/ingest/transform-enrich/set-up-an-enrich-processor.md b/manage-data/ingest/transform-enrich/set-up-an-enrich-processor.md index 45cf9b7d77..c230b29c7c 100644 --- a/manage-data/ingest/transform-enrich/set-up-an-enrich-processor.md +++ b/manage-data/ingest/transform-enrich/set-up-an-enrich-processor.md @@ -17,7 +17,7 @@ To set up an enrich processor, follow these steps: Once you have an enrich processor set up, you can [update your enrich data](#update-enrich-data) and [update your enrich policies](#update-enrich-policies). ::::{important} -The enrich processor performs several operations and may impact the speed of your ingest pipeline. We recommend [node roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) co-locating ingest and data roles to minimize remote search operations. +The enrich processor performs several operations and may impact the speed of your ingest pipeline. We recommend [node roles](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md) co-locating ingest and data roles to minimize remote search operations. We strongly recommend testing and benchmarking your enrich processors before deploying them in production. @@ -40,7 +40,7 @@ To begin, add documents to one or more source indices. These documents should co You can manage source indices just like regular {{es}} indices using the [document](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-document) and [index](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-indices) APIs. -You also can set up [{{beats}}](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html), such as a [{{filebeat}}](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html), to automatically send and index documents to your source indices. See [Getting started with {{beats}}](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html). +You also can set up [{{beats}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/index.md), such as a [{{filebeat}}](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md), to automatically send and index documents to your source indices. See [Getting started with {{beats}}](https://www.elastic.co/guide/en/beats/libbeat/current/getting-started.html). ## Create an enrich policy [create-enrich-policy] @@ -65,7 +65,7 @@ Once the enrich policy is created, you need to execute it using the [execute enr The *enrich index* contains documents from the policy’s source indices. Enrich indices always begin with `.enrich-*`, are read-only, and are [force merged](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge). ::::{warning} -Enrich indices should only be used by the [enrich processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html) or the [{{esql}} `ENRICH` command](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-enrich). Avoid using enrich indices for other purposes. +Enrich indices should only be used by the [enrich processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/enrich-processor.md) or the [{{esql}} `ENRICH` command](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/esql-commands.md#esql-enrich). Avoid using enrich indices for other purposes. :::: @@ -91,7 +91,7 @@ You also can use the `max_matches` option to set the number of enrich documents See [Enrich](https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-processor.html) for a full list of configuration options. -You also can add other [processors](https://www.elastic.co/guide/en/elasticsearch/reference/current/processors.html) to your ingest pipeline. +You also can add other [processors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/index.md) to your ingest pipeline. ## Ingest and enrich documents [ingest-enrich-docs] diff --git a/manage-data/lifecycle/curator.md b/manage-data/lifecycle/curator.md index 6cac09cf7f..05630ce6c9 100644 --- a/manage-data/lifecycle/curator.md +++ b/manage-data/lifecycle/curator.md @@ -6,4 +6,4 @@ navigation_title: Curator Similar to {{ilm-cap}} ({{ilm-init}}), Elasticsearch Curator can help you manage index lifecycles. **If {{ilm-init}} provides the functionality to manage your index lifecycle and you have at least a Basic license, use {{ilm-init}} instead of Curator.** Many {{stack}} components use {{ilm-init}} by default. -If you're looking for additional functionality for managing your index lifecycle, you can read more about how Elasticsearch Curator may help in [Curator index management](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html). +If you're looking for additional functionality for managing your index lifecycle, you can read more about how Elasticsearch Curator may help in [Curator index management](asciidocalypse://docs/curator/docs/reference/elasticsearch/elasticsearch-client-curator/index.md). diff --git a/manage-data/lifecycle/data-stream.md b/manage-data/lifecycle/data-stream.md index 4d2854d4a7..22ce6565b2 100644 --- a/manage-data/lifecycle/data-stream.md +++ b/manage-data/lifecycle/data-stream.md @@ -20,7 +20,7 @@ A data stream lifecycle also supports downsampling the data stream backing indic ## How does it work? [data-streams-lifecycle-how-it-works] -In intervals configured by [`data_streams.lifecycle.poll_interval`](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-lifecycle-settings.html#data-streams-lifecycle-poll-interval), {{es}} goes over each data stream and performs the following steps: +In intervals configured by [`data_streams.lifecycle.poll_interval`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/data-stream-lifecycle-settings.md#data-streams-lifecycle-poll-interval), {{es}} goes over each data stream and performs the following steps: 1. Checks if the data stream has a data stream lifecycle configured, skipping any indices not part of a managed data stream. 2. Rolls over the write index of the data stream, if it fulfills the conditions defined by [`cluster.lifecycle.default.rollover`](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-lifecycle-settings.html#cluster-lifecycle-default-rollover). diff --git a/manage-data/lifecycle/data-stream/tutorial-data-stream-retention.md b/manage-data/lifecycle/data-stream/tutorial-data-stream-retention.md index 241e4e456b..d89c99649c 100644 --- a/manage-data/lifecycle/data-stream/tutorial-data-stream-retention.md +++ b/manage-data/lifecycle/data-stream/tutorial-data-stream-retention.md @@ -50,7 +50,7 @@ Retention does not define the period that the data will be removed, but the mini We define 4 different types of retention: * The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be set via an [index template](../../data-store/templates.md) for future data streams or via the [PUT data stream lifecycle API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle) for an existing data stream. When the data stream retention is not set, it implies that the data need to be kept forever. -* The global default retention, let’s call it `default_retention`, which is a retention configured via the cluster setting [`data_streams.lifecycle.retention.default`](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-lifecycle-settings.html#data-streams-lifecycle-retention-default) and will be applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, it ensures that there will be no data streams keeping their data forever. This can be set via the [update cluster settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). +* The global default retention, let’s call it `default_retention`, which is a retention configured via the cluster setting [`data_streams.lifecycle.retention.default`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/data-stream-lifecycle-settings.md#data-streams-lifecycle-retention-default) and will be applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, it ensures that there will be no data streams keeping their data forever. This can be set via the [update cluster settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). * The global max retention, let’s call it `max_retention`, which is a retention configured via the cluster setting [`data_streams.lifecycle.retention.max`](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-lifecycle-settings.html#data-streams-lifecycle-retention-max) and will be applied to all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention will exceed this time period. This can be set via the [update cluster settings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). * The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is calculated as it is described [here](#effective-retention-calculation). diff --git a/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md b/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md index f68181baa1..bafd850070 100644 --- a/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md +++ b/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md @@ -12,7 +12,7 @@ In this tutorial we’ll look at migrating an existing data stream from [Index L To migrate a data stream from {{ilm-init}} to data stream lifecycle we’ll have to execute two steps: -1. Update the index template that’s backing the data stream to set [prefer_ilm](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-stream-lifecycle-settings.html#index-lifecycle-prefer-ilm) to `false`, and to configure data stream lifecycle. +1. Update the index template that’s backing the data stream to set [prefer_ilm](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/data-stream-lifecycle-settings.md#index-lifecycle-prefer-ilm) to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the *existing* data stream using the [lifecycle API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle). For more details see the [migrate to data stream lifecycle](#migrate-from-ilm-to-dsl) section. diff --git a/manage-data/lifecycle/data-tiers.md b/manage-data/lifecycle/data-tiers.md index 0bec5e43b1..fac7b8e287 100644 --- a/manage-data/lifecycle/data-tiers.md +++ b/manage-data/lifecycle/data-tiers.md @@ -7,7 +7,7 @@ mapped_urls: # Data tiers -A *data tier* is a collection of [nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) within a cluster that share the same [data node role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles), and a hardware profile that’s appropriately sized for the role. Elastic recommends that nodes in the same tier share the same hardware profile to avoid [hot spotting](/troubleshoot/elasticsearch/hotspotting.md). +A *data tier* is a collection of [nodes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md) within a cluster that share the same [data node role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles), and a hardware profile that’s appropriately sized for the role. Elastic recommends that nodes in the same tier share the same hardware profile to avoid [hot spotting](/troubleshoot/elasticsearch/hotspotting.md). ## Available data tiers [available-tier] @@ -21,11 +21,11 @@ The data tiers that you use, and the way that you use them, depends on the data * [Hot tier](/manage-data/lifecycle/data-tiers.md#hot-tier) nodes handle the indexing load for time series data, such as logs or metrics. They hold your most recent, most-frequently-accessed data. * [Warm tier](/manage-data/lifecycle/data-tiers.md#warm-tier) nodes hold time series data that is accessed less-frequently and rarely needs to be updated. -* [Cold tier](/manage-data/lifecycle/data-tiers.md#cold-tier) nodes hold time series data that is accessed infrequently and not normally updated. To save space, you can keep [fully mounted indices](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md#fully-mounted) of [{{search-snaps}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) on the cold tier. These fully mounted indices eliminate the need for replicas, reducing required disk space by approximately 50% compared to the regular indices. +* [Cold tier](/manage-data/lifecycle/data-tiers.md#cold-tier) nodes hold time series data that is accessed infrequently and not normally updated. To save space, you can keep [fully mounted indices](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md#fully-mounted) of [{{search-snaps}}](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) on the cold tier. These fully mounted indices eliminate the need for replicas, reducing required disk space by approximately 50% compared to the regular indices. * [Frozen tier](/manage-data/lifecycle/data-tiers.md#frozen-tier) nodes hold time series data that is accessed rarely and never updated. The frozen tier stores [partially mounted indices](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md#partially-mounted) of [{{search-snaps}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) exclusively. This extends the storage capacity even further — by up to 20 times compared to the warm tier. ::::{tip} -The performance of an {{es}} node is often limited by the performance of the underlying storage and hardware profile. For example hardware profiles, refer to Elastic Cloud’s [instance configurations](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html). Review our recommendations for optimizing your storage for [indexing](/deploy-manage/production-guidance/optimize-performance/indexing-speed.md#indexing-use-faster-hardware) and [search](/deploy-manage/production-guidance/optimize-performance/search-speed.md#search-use-faster-hardware). +The performance of an {{es}} node is often limited by the performance of the underlying storage and hardware profile. For example hardware profiles, refer to Elastic Cloud’s [instance configurations](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md). Review our recommendations for optimizing your storage for [indexing](/deploy-manage/production-guidance/optimize-performance/indexing-speed.md#indexing-use-faster-hardware) and [search](/deploy-manage/production-guidance/optimize-performance/search-speed.md#search-use-faster-hardware). :::: ::::{important} @@ -306,7 +306,7 @@ To make sure that all data can be migrated from the data tier you want to disabl ##### Searchable snapshot data tier [ece-disable-searchable-snapshot-data-tier] -When data reaches the `cold` or `frozen` phases, it is automatically converted to a [searchable snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots.html) by ILM. If you do not intend to delete this data, you should manually restore each of the searchable snapshot indices to a regular index before disabling the data tier, by following these steps: +When data reaches the `cold` or `frozen` phases, it is automatically converted to a [searchable snapshot](/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md) by ILM. If you do not intend to delete this data, you should manually restore each of the searchable snapshot indices to a regular index before disabling the data tier, by following these steps: 1. Stop ILM and check ILM status is `STOPPED` to prevent data from migrating to the phase you intend to disable while you are working through the next steps. @@ -363,7 +363,7 @@ When data reaches the `cold` or `frozen` phases, it is automatically converted t 5. Restore indices from the searchable snapshots. 1. Follow the steps to [specify the data tier based allocation inclusion rules](/manage-data/lifecycle/data-tiers.md#update-data-tier-allocation-rules). - 2. Remove the associated ILM policy (set it to `null`). If you want to apply a different ILM policy, follow the steps to [Switch lifecycle policies](https://www.elastic.co/guide/en/elasticsearch/reference/current/set-up-lifecycle-policy.html#switch-lifecycle-policies). + 2. Remove the associated ILM policy (set it to `null`). If you want to apply a different ILM policy, follow the steps to [Switch lifecycle policies](/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md#switch-lifecycle-policies). 3. If needed, specify the alias for rollover, otherwise set it to `null`. 4. Optionally, specify the desired number of replica shards. @@ -433,7 +433,7 @@ We recommend you use [dedicated nodes](/deploy-manage/distributed-architecture/c ## Data tier index allocation [data-tier-allocation] -The [`index.routing.allocation.include._tier_preference`](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tier-shard-filtering.html#tier-preference-allocation-filter) setting determines which tier the index should be allocated to. +The [`index.routing.allocation.include._tier_preference`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/data-tier-allocation-settings.md#tier-preference-allocation-filter) setting determines which tier the index should be allocated to. When you create an index, by default {{es}} sets the `_tier_preference` to `data_content` to automatically allocate the index shards to the content tier. @@ -448,7 +448,7 @@ You can override this setting after index creation by [updating the index settin This setting also accepts multiple tiers in order of preference. This prevents indices from remaining unallocated if no nodes are available in the preferred tier. For example, when {{ilm}} migrates an index to the cold phase, it sets the index `_tier_preference` to `data_cold,data_warm,data_hot`. -To remove the data tier preference setting, set the `_tier_preference` value to `null`. This allows the index to allocate to any data node within the cluster. Setting the `_tier_preference` to `null` does not restore the default value. Note that, in the case of managed indices, a [migrate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html) action might apply a new value in its place. +To remove the data tier preference setting, set the `_tier_preference` value to `null`. This allows the index to allocate to any data node within the cluster. Setting the `_tier_preference` to `null` does not restore the default value. Note that, in the case of managed indices, a [migrate](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-migrate.md) action might apply a new value in its place. ### Determine the current data tier preference [data-tier-allocation-value] @@ -467,4 +467,4 @@ This setting will not unallocate a currently allocated shard, but might prevent ### Automatic data tier migration [data-tier-migration] -{{ilm-init}} automatically transitions managed indices through the available data tiers using the [migrate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html) action. By default, this action is automatically injected in every phase. You can explicitly specify the migrate action with `"enabled": false` to [disable automatic migration](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html#ilm-disable-migrate-ex), for example, if you’re using the [allocate action](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-allocate.html) to manually specify allocation rules. +{{ilm-init}} automatically transitions managed indices through the available data tiers using the [migrate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html) action. By default, this action is automatically injected in every phase. You can explicitly specify the migrate action with `"enabled": false` to [disable automatic migration](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html#ilm-disable-migrate-ex), for example, if you’re using the [allocate action](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-allocate.md) to manually specify allocation rules. diff --git a/manage-data/lifecycle/index-lifecycle-management.md b/manage-data/lifecycle/index-lifecycle-management.md index 9840bad157..af8cf6f4fb 100644 --- a/manage-data/lifecycle/index-lifecycle-management.md +++ b/manage-data/lifecycle/index-lifecycle-management.md @@ -37,7 +37,7 @@ To use {{ilm-init}}, all nodes in a cluster must run the same version. Although * **Shrink**: Reduces the number of primary shards in an index. * **Force merge**: Triggers a [force merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html) to reduce the number of segments in an index’s shards. * **Delete**: Permanently remove an index, including all of its data and metadata. -* [And more](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-actions.html) +* [And more](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/index.md) Each action has options you can use to specify index behavior and characteristics like: diff --git a/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md b/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md index 53ec02ee16..c37ac213dd 100644 --- a/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md +++ b/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md @@ -202,7 +202,7 @@ To switch an index’s lifecycle policy, follow these steps: 2. The remove policy API removes all {{ilm-init}} metadata from the index and doesn’t consider the index’s lifecycle status. This can leave indices in an undesired state. - For example, the [`forcemerge`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-forcemerge.html) action temporarily closes an index before reopening it. Removing an index’s {{ilm-init}} policy during a `forcemerge` can leave the index closed indefinitely. + For example, the [`forcemerge`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-forcemerge.md) action temporarily closes an index before reopening it. Removing an index’s {{ilm-init}} policy during a `forcemerge` can leave the index closed indefinitely. After policy removal, use the [get index API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) to check an index’s state . Target a data stream or alias to get the state of all its indices. diff --git a/manage-data/lifecycle/index-lifecycle-management/index-lifecycle.md b/manage-data/lifecycle/index-lifecycle-management/index-lifecycle.md index b9d21d94ce..d0b1bac13e 100644 --- a/manage-data/lifecycle/index-lifecycle-management/index-lifecycle.md +++ b/manage-data/lifecycle/index-lifecycle-management/index-lifecycle.md @@ -31,7 +31,7 @@ If you use {{es}}'s security features, {{ilm-init}} performs operations as the u The minimum age defaults to zero, which causes {{ilm-init}} to move indices to the next phase as soon as all actions in the current phase complete. ::::{note} -If an index has been [rolled over](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-rollover.html), then the `min_age` value is relative to the time the index was rolled over, not the index creation time. [Learn more](../../../troubleshoot/elasticsearch/index-lifecycle-management-errors.md#min-age-calculation). +If an index has been [rolled over](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-rollover.md), then the `min_age` value is relative to the time the index was rolled over, not the index creation time. [Learn more](../../../troubleshoot/elasticsearch/index-lifecycle-management-errors.md#min-age-calculation). :::: @@ -56,14 +56,14 @@ When an index enters a phase, {{ilm-init}} caches the phase definition in the in * Hot - * [Set Priority](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-set-priority.html) - * [Unfollow](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-unfollow.html) + * [Set Priority](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-set-priority.md) + * [Unfollow](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-unfollow.md) * [Rollover](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-rollover.html) - * [Read-Only](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-readonly.html) - * [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) - * [Shrink](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-shrink.html) - * [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-forcemerge.html) - * [Searchable Snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-searchable-snapshot.html) + * [Read-Only](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-readonly.md) + * [Downsample](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-downsample.md) + * [Shrink](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-shrink.md) + * [Force Merge](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-forcemerge.md) + * [Searchable Snapshot](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md) * Warm @@ -71,8 +71,8 @@ When an index enters a phase, {{ilm-init}} caches the phase definition in the in * [Unfollow](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-unfollow.html) * [Read-Only](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-readonly.html) * [Downsample](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-downsample.html) - * [Allocate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-allocate.html) - * [Migrate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html) + * [Allocate](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-allocate.md) + * [Migrate](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-migrate.md) * [Shrink](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-shrink.html) * [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-forcemerge.html) @@ -93,5 +93,5 @@ When an index enters a phase, {{ilm-init}} caches the phase definition in the in * Delete - * [Wait For Snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-wait-for-snapshot.html) - * [Delete](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete.html) + * [Wait For Snapshot](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-wait-for-snapshot.md) + * [Delete](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-delete.md) diff --git a/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md b/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md index 02a4da5dd4..191fb191dc 100644 --- a/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md +++ b/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md @@ -32,7 +32,7 @@ Investigate your indices and perform operations from the **Indices** view. * To show details and perform operations such as close, forcemerge, and flush, click the index name. To perform operations on multiple indices, select their checkboxes and then open the **Manage** menu. For more information on managing indices, refer to [Index APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-indices). * To filter the list of indices, use the search bar or click a badge. Badges indicate if an index is a [follower index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow), a [rollup index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps), or [frozen](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-unfreeze). -* To drill down into the index [mappings](../../data-store/mapping.md), [settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings), and statistics, click an index name. From this view, you can navigate to **Discover** to further explore the documents in the index. +* To drill down into the index [mappings](../../data-store/mapping.md), [settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-modules-settings), and statistics, click an index name. From this view, you can navigate to **Discover** to further explore the documents in the index. :::{image} ../../../images/elasticsearch-reference-management_index_details.png :alt: Index Management UI @@ -99,7 +99,7 @@ In this tutorial, you’ll create an index template and use it to configure two ::: 2. Define index settings. These are optional. For this tutorial, leave this section blank. -3. Define a mapping that contains an [object](https://www.elastic.co/guide/en/elasticsearch/reference/current/object.html) field named `geo` with a child [`geo_point`](https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-point.html) field named `coordinates`: +3. Define a mapping that contains an [object](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/object.md) field named `geo` with a child [`geo_point`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/geo-point.md) field named `coordinates`: :::{image} ../../../images/elasticsearch-reference-management-index-templates-mappings.png :alt: Mapped fields page @@ -189,7 +189,7 @@ Use the **Enrich Policies** view to add data from your existing indices to incom * The source indices that store enrich data as documents * The fields from the source indices used to match incoming documents * The enrich fields containing enrich data from the source indices that you want to add to incoming documents -* An optional [query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-all-query.html). +* An optional [query](asciidocalypse://docs/elasticsearch/docs/reference/query-languages/query-dsl-match-all-query.md). :::{image} ../../../images/elasticsearch-reference-management-enrich-policies.png :alt: Enrich policies diff --git a/manage-data/lifecycle/index-lifecycle-management/manage-existing-indices.md b/manage-data/lifecycle/index-lifecycle-management/manage-existing-indices.md index 726eef5764..4773fa73c4 100644 --- a/manage-data/lifecycle/index-lifecycle-management/manage-existing-indices.md +++ b/manage-data/lifecycle/index-lifecycle-management/manage-existing-indices.md @@ -24,7 +24,7 @@ Define a separate policy for your older indices that omits the rollover action. Keep in mind that policies applied to existing indices compare the `min_age` for each phase to the original creation date of the index, and might proceed through multiple phases immediately. If your policy performs resource-intensive operations like force merge, you don’t want to have a lot of indices performing those operations all at once when you switch over to {{ilm-init}}. -You can specify different `min_age` values in the policy you use for existing indices, or set [`index.lifecycle.origination_date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-settings.html#index-lifecycle-origination-date) to control how the index age is calculated. +You can specify different `min_age` values in the policy you use for existing indices, or set [`index.lifecycle.origination_date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/index-lifecycle-management-settings.md#index-lifecycle-origination-date) to control how the index age is calculated. Once all pre-{{ilm-init}} indices have been aged out and removed, you can delete the policy you used to manage them. diff --git a/manage-data/lifecycle/index-lifecycle-management/migrate-index-allocation-filters-to-node-roles.md b/manage-data/lifecycle/index-lifecycle-management/migrate-index-allocation-filters-to-node-roles.md index bfc2cc3114..7804c32f8c 100644 --- a/manage-data/lifecycle/index-lifecycle-management/migrate-index-allocation-filters-to-node-roles.md +++ b/manage-data/lifecycle/index-lifecycle-management/migrate-index-allocation-filters-to-node-roles.md @@ -5,7 +5,7 @@ mapped_pages: # Migrate index allocation filters to node roles [migrate-index-allocation-filters] -If you currently use [custom node attributes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#custom-node-attributes) and [attribute-based allocation filters](../../../deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/index-level-shard-allocation.md) to move indices through [data tiers](../data-tiers.md) in a [hot-warm-cold architecture](https://www.elastic.co/blog/implementing-hot-warm-cold-in-elasticsearch-with-index-lifecycle-management), we recommend that you switch to using the built-in node roles and automatic [data tier allocation](../data-tiers.md#data-tier-allocation). Using node roles enables {{ilm-init}} to automatically move indices between data tiers. +If you currently use [custom node attributes](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#custom-node-attributes) and [attribute-based allocation filters](../../../deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/index-level-shard-allocation.md) to move indices through [data tiers](../data-tiers.md) in a [hot-warm-cold architecture](https://www.elastic.co/blog/implementing-hot-warm-cold-in-elasticsearch-with-index-lifecycle-management), we recommend that you switch to using the built-in node roles and automatic [data tier allocation](../data-tiers.md#data-tier-allocation). Using node roles enables {{ilm-init}} to automatically move indices between data tiers. ::::{note} While we recommend relying on automatic data tier allocation to manage your data in a hot-warm-cold architecture, you can still use attribute-based allocation filters to control shard allocation for other purposes. @@ -68,7 +68,7 @@ node.roles [ data_hot, data_content ] ### Remove custom allocation settings from existing {{ilm-init}} policies [remove-custom-allocation-settings] -Update the allocate action for each lifecycle phase to remove the attribute-based allocation settings. {{ilm-init}} will inject a [migrate](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate.html) action into each phase to automatically transition the indices through the data tiers. +Update the allocate action for each lifecycle phase to remove the attribute-based allocation settings. {{ilm-init}} will inject a [migrate](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/ilm-migrate.md) action into each phase to automatically transition the indices through the data tiers. If the allocate action does not set the number of replicas, remove the allocate action entirely. (An empty allocate action is invalid.) @@ -100,7 +100,7 @@ To completely avoid the issues that raise when mixing the tier preference and cu To enable {{ilm-init}} to move an *existing* managed index through the data tiers, update the index settings to: 1. Remove the custom allocation filter by setting it to `null`. -2. Set the [tier preference](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tier-shard-filtering.html#tier-preference-allocation-filter). +2. Set the [tier preference](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/data-tier-allocation-settings.md#tier-preference-allocation-filter). For example, if your old template set the `data` attribute to `hot` to allocate shards to the hot tier, set the `data` attribute to `null` and set the `_tier_preference` to `data_hot`. diff --git a/manage-data/lifecycle/index-lifecycle-management/rollover.md b/manage-data/lifecycle/index-lifecycle-management/rollover.md index 6f1d433c50..f34fcd0e7a 100644 --- a/manage-data/lifecycle/index-lifecycle-management/rollover.md +++ b/manage-data/lifecycle/index-lifecycle-management/rollover.md @@ -17,7 +17,7 @@ We recommend using [data streams](https://www.elastic.co/docs/api/doc/elasticsea Each data stream requires an [index template](../../data-store/templates.md) that contains: * A name or wildcard (`*`) pattern for the data stream. -* The data stream’s timestamp field. This field must be mapped as a [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) or [`date_nanos`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html) field data type and must be included in every document indexed to the data stream. +* The data stream’s timestamp field. This field must be mapped as a [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md) field data type and must be included in every document indexed to the data stream. * The mappings and settings applied to each backing index when it’s created. Data streams are designed for append-only data, where the data stream name can be used as the operations (read, write, rollover, shrink etc.) target. If your use case requires data to be updated in place, you can instead manage your time series data using [index aliases](../../data-store/aliases.md). However, there are a few more configuration steps and concepts: diff --git a/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md b/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md index 4639787fd0..032d78870f 100644 --- a/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md +++ b/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md @@ -15,7 +15,7 @@ When you continuously index timestamped documents into {{es}}, you typically use To automate rollover and management of a data stream with {{ilm-init}}, you: -1. [Create a lifecycle policy](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md#ilm-gs-create-policy) that defines the appropriate [phases](index-lifecycle.md) and [actions](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-actions.html). +1. [Create a lifecycle policy](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md#ilm-gs-create-policy) that defines the appropriate [phases](index-lifecycle.md) and [actions](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-lifecycle-actions/index.md). 2. [Create an index template](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md#ilm-gs-apply-policy) to [create the data stream](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md#ilm-gs-create-the-data-stream) and apply the ILM policy and the indices settings and mappings configurations for the backing indices. 3. [Verify indices are moving through the lifecycle phases](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md#ilm-gs-check-progress) as expected. diff --git a/manage-data/lifecycle/rollup/getting-started-kibana.md b/manage-data/lifecycle/rollup/getting-started-kibana.md index ef78a43e24..d8a19b3951 100644 --- a/manage-data/lifecycle/rollup/getting-started-kibana.md +++ b/manage-data/lifecycle/rollup/getting-started-kibana.md @@ -52,7 +52,7 @@ You can’t change a rollup job after you’ve created it. To select additional ## Try it: Create and visualize rolled up data [rollup-data-tutorial] -This example creates a rollup job to capture log data from sample web logs. Before you start, [add the web logs sample data set](https://www.elastic.co/guide/en/kibana/current/get-started.html). +This example creates a rollup job to capture log data from sample web logs. Before you start, [add the web logs sample data set](/explore-analyze/index.md). In this example, you want data that is older than 7 days in the `kibana_sample_data_logs` index to roll up into the `rollup_logstash` index. You’ll bucket the rolled up data on an hourly basis, using `60m` for the time bucket configuration. diff --git a/manage-data/lifecycle/rollup/understanding-groups.md b/manage-data/lifecycle/rollup/understanding-groups.md index 1b0ee94a18..eae9e102f3 100644 --- a/manage-data/lifecycle/rollup/understanding-groups.md +++ b/manage-data/lifecycle/rollup/understanding-groups.md @@ -108,7 +108,7 @@ Ultimately, when configuring `groups` for a job, think in terms of how you might ## Calendar vs fixed time intervals [rollup-understanding-group-intervals] -Each rollup-job must have a date histogram group with a defined interval. {{es}} understands both [calendar and fixed time intervals](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html#calendar_and_fixed_intervals). Fixed time intervals are fairly easy to understand; `60s` means sixty seconds. But what does `1M` mean? One month of time depends on which month we are talking about, some months are longer or shorter than others. This is an example of calendar time and the duration of that unit depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. +Each rollup-job must have a date histogram group with a defined interval. {{es}} understands both [calendar and fixed time intervals](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-bucket-datehistogram-aggregation.md#calendar_and_fixed_intervals). Fixed time intervals are fairly easy to understand; `60s` means sixty seconds. But what does `1M` mean? One month of time depends on which month we are talking about, some months are longer or shorter than others. This is an example of calendar time and the duration of that unit depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. This is important because the buckets generated by rollup are in either calendar or fixed intervals and this limits how you can query them later. See [Requests must be multiples of the config](rollup-search-limitations.md#rollup-search-limitations-intervals). diff --git a/manage-data/migrate.md b/manage-data/migrate.md index fb6fffbb3c..d2b2322b5a 100644 --- a/manage-data/migrate.md +++ b/manage-data/migrate.md @@ -30,7 +30,7 @@ Reindex from a remote cluster For {{ech}}, if your cluster is self-managed with a self-signed certificate, you can follow this [step-by-step migration guide](migrate/migrate-from-a-self-managed-cluster-with-a-self-signed-certificate-using-remote-reindex.md). Restore from a snapshot -: The new cluster must be the same size as your old one, or larger, to accommodate the data. The new cluster must also be an Elasticsearch version that is compatible with the old cluster (check [Elasticsearch snapshot version compatibility](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html#snapshot-restore-version-compatibility) for details). If you have not already done so, you will need to [set up snapshots for your old cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html) using a repository that can be accessed from the new cluster. +: The new cluster must be the same size as your old one, or larger, to accommodate the data. The new cluster must also be an Elasticsearch version that is compatible with the old cluster (check [Elasticsearch snapshot version compatibility](/deploy-manage/tools/snapshot-and-restore.md#snapshot-restore-version-compatibility) for details). If you have not already done so, you will need to [set up snapshots for your old cluster](/deploy-manage/tools/snapshot-and-restore/self-managed.md) using a repository that can be accessed from the new cluster. Migrating internal {{es}} indices : For {{ech}} and Elasticsearch Add-On for Heroku, if you are migrating internal {{es}} indices from another cluster, specifically the `.kibana` index or the `.security` index, there are two options: @@ -39,7 +39,7 @@ Migrating internal {{es}} indices * Check [Migrating internal indices](migrate/migrate-internal-indices.md) to restore the internal {{es}} indices from a snapshot. ::::{warning} -Before you migrate your {{es}} data, [define your index mappings](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html) on the new cluster. Index mappings are unable to migrate during reindex operations. +Before you migrate your {{es}} data, [define your index mappings](/manage-data/data-store/mapping.md) on the new cluster. Index mappings are unable to migrate during reindex operations. :::: ### Index from the source [ec-index-source] diff --git a/manage-data/use-case-use-elasticsearch-to-manage-time-series-data.md b/manage-data/use-case-use-elasticsearch-to-manage-time-series-data.md index 4479095eaf..fbf019506c 100644 --- a/manage-data/use-case-use-elasticsearch-to-manage-time-series-data.md +++ b/manage-data/use-case-use-elasticsearch-to-manage-time-series-data.md @@ -34,7 +34,7 @@ The steps for setting up data tiers vary based on your deployment type: :::::: ::::::{tab-item} Self-managed -To assign a node to a data tier, add the respective [node role](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles) to the node’s `elasticsearch.yml` file. Changing an existing node’s roles requires a [rolling restart](../deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md#restart-cluster-rolling). +To assign a node to a data tier, add the respective [node role](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md#node-roles) to the node’s `elasticsearch.yml` file. Changing an existing node’s roles requires a [rolling restart](../deploy-manage/maintenance/start-stop-services/full-cluster-restart-rolling-restart-procedures.md#restart-cluster-rolling). ```yaml # Content tier @@ -94,7 +94,7 @@ Use any of the following repository types with searchable snapshots: * [AWS S3](../deploy-manage/tools/snapshot-and-restore/s3-repository.md) * [Google Cloud Storage](../deploy-manage/tools/snapshot-and-restore/google-cloud-storage-repository.md) * [Azure Blob Storage](../deploy-manage/tools/snapshot-and-restore/azure-repository.md) -* [Hadoop Distributed File Store (HDFS)](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository-hdfs.html) +* [Hadoop Distributed File Store (HDFS)](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch-plugins/repository-hdfs.md) * [Shared filesystems](../deploy-manage/tools/snapshot-and-restore/shared-file-system-repository.md) such as NFS * [Read-only HTTP and HTTPS repositories](../deploy-manage/tools/snapshot-and-restore/read-only-url-repository.md) @@ -249,13 +249,13 @@ If you use a custom application, you need to set up your own data stream. A data When creating your component templates, include: -* A [`date`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html) or [`date_nanos`](https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html) mapping for the `@timestamp` field. If you don’t specify a mapping, {{es}} maps `@timestamp` as a `date` field with default options. +* A [`date`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/date_nanos.md) mapping for the `@timestamp` field. If you don’t specify a mapping, {{es}} maps `@timestamp` as a `date` field with default options. * Your lifecycle policy in the `index.lifecycle.name` index setting. ::::{tip} Use the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current) when mapping your fields. ECS fields integrate with several {{stack}} features by default. -If you’re unsure how to map your fields, use [runtime fields](data-store/mapping/define-runtime-fields-in-search-request.md) to extract fields from [unstructured content](https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html#mapping-unstructured-content) at search time. For example, you can index a log message to a `wildcard` field and later extract IP addresses and other data from this field during a search. +If you’re unsure how to map your fields, use [runtime fields](data-store/mapping/define-runtime-fields-in-search-request.md) to extract fields from [unstructured content](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/keyword.md#mapping-unstructured-content) at search time. For example, you can index a log message to a `wildcard` field and later extract IP addresses and other data from this field during a search. :::: @@ -307,7 +307,7 @@ PUT _component_template/my-settings Use your component templates to create an index template. Specify: -* One or more index patterns that match the data stream’s name. We recommend using our [data stream naming scheme](https://www.elastic.co/guide/en/fleet/current/data-streams.html#data-streams-naming-scheme). +* One or more index patterns that match the data stream’s name. We recommend using our [data stream naming scheme](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/data-streams.md#data-streams-naming-scheme). * That the template is data stream enabled. * Any component templates that contain your mappings and index settings. * A priority higher than `200` to avoid collisions with built-in templates. See [Avoid index pattern collisions](data-store/templates.md#avoid-index-pattern-collisions). diff --git a/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md b/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md index 709315577a..c01780bcbe 100644 --- a/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md +++ b/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md @@ -5,6 +5,6 @@ This functionality is in technical preview and may be changed or removed in a fu :::: -* [Android agent version 0.x](https://www.elastic.co/guide/en/apm/agent/android/current/release-notes-0.x.html) +* [Android agent version 0.x](asciidocalypse://docs/apm-agent-android/docs/release-notes/apm-android-agent.md) diff --git a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-es-secure-settings.md b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-es-secure-settings.md index ef96af9a95..4ab7b16999 100644 --- a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-es-secure-settings.md +++ b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-es-secure-settings.md @@ -1,6 +1,6 @@ # Secure settings [k8s-es-secure-settings] -You can specify [secure settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html) with [Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/). The secrets should contain a key-value pair for each secure setting you want to add. ECK automatically injects these settings into the keystore on each Elasticsearch node before it starts Elasticsearch. The ECK operator continues to watch the secrets for changes and will update the Elasticsearch keystore when it detects a change. +You can specify [secure settings](/deploy-manage/security/secure-settings.md) with [Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/). The secrets should contain a key-value pair for each secure setting you want to add. ECK automatically injects these settings into the keystore on each Elasticsearch node before it starts Elasticsearch. The ECK operator continues to watch the secrets for changes and will update the Elasticsearch keystore when it detects a change. ## Basic usage [k8s_basic_usage] diff --git a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-orchestration.md b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-orchestration.md index 049fb3c251..fb66c81a65 100644 --- a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-orchestration.md +++ b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-orchestration.md @@ -120,7 +120,7 @@ Depending on how the NodeSets are updated, ECK handles the Kubernetes resource r * The specification of an existing NodeSet is updated. For example, the Elasticsearch configuration, or the PodTemplate resources requirements. - ECK performs a rolling upgrade of the corresponding Elasticsearch nodes. It follows the [Elasticsearch rolling upgrade best practices](https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elasticsearch.html) to update the underlying Pods while maintaining the availability of the Elasticsearch cluster where possible. In most cases, the process simply involves restarting Elasticsearch nodes one-by-one. Note that some cluster topologies may be impossible to deploy without making the cluster unavailable (check [Limitations](../../../deploy-manage/upgrade/deployment-or-cluster.md#k8s-orchestration-limitations) ). + ECK performs a rolling upgrade of the corresponding Elasticsearch nodes. It follows the [Elasticsearch rolling upgrade best practices](/deploy-manage/upgrade/deployment-or-cluster.md) to update the underlying Pods while maintaining the availability of the Elasticsearch cluster where possible. In most cases, the process simply involves restarting Elasticsearch nodes one-by-one. Note that some cluster topologies may be impossible to deploy without making the cluster unavailable (check [Limitations](../../../deploy-manage/upgrade/deployment-or-cluster.md#k8s-orchestration-limitations) ). * An existing NodeSet is renamed. @@ -145,7 +145,7 @@ Due to relying on Kubernetes primitives such as StatefulSets, the ECK orchestrat * Clusters containing indices with no replicas -If an {{es}} node holds the only copy of a shard, this shard becomes unavailable while the node is upgraded. To ensure [high availability](https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability-cluster-design.html) it is recommended to configure clusters with three master nodes, more than one node per [data tier](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-tiers.html) and at least one replica per index. +If an {{es}} node holds the only copy of a shard, this shard becomes unavailable while the node is upgraded. To ensure [high availability](/deploy-manage/production-guidance/availability-and-resilience.md) it is recommended to configure clusters with three master nodes, more than one node per [data tier](/manage-data/lifecycle/data-tiers.md) and at least one replica per index. * Elasticsearch Pods may stay `Pending` during a rolling upgrade if the Kubernetes scheduler cannot re-schedule them back. This is especially important when using local PersistentVolumes. If the Kubernetes node bound to a local PersistentVolume does not have enough capacity to host an upgraded Pod which was temporarily removed, that Pod will stay `Pending`. * Rolling upgrades can only make progress if the Elasticsearch cluster health is green. There are exceptions to this rule if the cluster health is yellow and if the following conditions are satisfied: @@ -169,7 +169,7 @@ Advanced users may force an upgrade by manually deleting Pods themselves. The de Operations that reduce the number of nodes in the cluster cannot make progress without user intervention, if the Elasticsearch index replica settings are incompatible with the intended downscale. Specifically, if the Elasticsearch index settings demand a higher number of shard copies than data nodes in the cluster after the downscale operation, ECK cannot migrate the data away from the node about to be removed. You can address this in the following ways: * Adjust the Elasticsearch [index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) to a number of replicas that allow the desired node removal. -* Use [`auto_expand_replicas`](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) to automatically adjust the replicas to the number of data nodes in the cluster. +* Use [`auto_expand_replicas`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#dynamic-index-settings) to automatically adjust the replicas to the number of data nodes in the cluster. ## Advanced control during rolling upgrades [k8s-advanced-upgrade-control] diff --git a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-saml-authentication.md b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-saml-authentication.md index e4cbae9f7e..30189e99a9 100644 --- a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-saml-authentication.md +++ b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-saml-authentication.md @@ -8,7 +8,7 @@ Elastic Stack SSO requires a valid Enterprise license or Enterprise trial licens ::::{tip} -Make sure you check the complete [Configuring SAML single sign-on on the Elastic Stack](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html) guide before setting up SAML SSO for Kibana and Elasticsearch deployments managed by ECK. +Make sure you check the complete [Configuring SAML single sign-on on the Elastic Stack](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md) guide before setting up SAML SSO for Kibana and Elasticsearch deployments managed by ECK. :::: @@ -100,7 +100,7 @@ To configure Elasticsearch for signing messages and/or for encrypted messages, k To enable SAML authentication in Kibana, you have to add SAML as an authentication provider and specify the SAML realm that you used in your Elasticsearch configuration. ::::{tip} -You can configure multiple authentication providers in Kibana and let users choose the provider they want to use. For more information, check [the Kibana authentication documentation](https://www.elastic.co/guide/en/kibana/current/kibana-authentication.html). +You can configure multiple authentication providers in Kibana and let users choose the provider they want to use. For more information, check [the Kibana authentication documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/user-authentication.md). :::: @@ -134,7 +134,7 @@ Your SAML users cannot login to Kibana until they are assigned roles. For more i The Elastic Stack supports generating service provider metadata, that can be imported to the identity provider, and configure many of the integration options between the identity provider and the service provider, automatically. For more information, check [the Generating SP metadata section](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-sp-metadata) in the Stack SAML guide. -To generate the Service Provider metadata using [the elasticsearch-saml-metadata command](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-metadata.html), you will have to run the command using `kubectl`, and then copy the generated metadata file to your local machine. For example: +To generate the Service Provider metadata using [the elasticsearch-saml-metadata command](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/saml-metadata.md), you will have to run the command using `kubectl`, and then copy the generated metadata file to your local machine. For example: ```sh # Create metadata diff --git a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-upgrading-stack.md b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-upgrading-stack.md index 85dd5ec34c..4be7c0a878 100644 --- a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-upgrading-stack.md +++ b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-upgrading-stack.md @@ -7,7 +7,7 @@ We have identified an issue with Elasticsearch 8.15.1 and 8.15.2 that prevents s The operator can safely perform upgrades to newer versions of the various Elastic Stack resources. -Follow the instructions in the [Elasticsearch documentation](https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elastic-stack.html). Make sure that your cluster is compatible with the target version, take backups, and follow the specific upgrade instructions for each resource type. When you are ready, modify the `version` field in the resource spec to the desired stack version and the operator will start the upgrade process automatically. +Follow the instructions in the [Elasticsearch documentation](/deploy-manage/upgrade/deployment-or-cluster.md). Make sure that your cluster is compatible with the target version, take backups, and follow the specific upgrade instructions for each resource type. When you are ready, modify the `version` field in the resource spec to the desired stack version and the operator will start the upgrade process automatically. ECK will make sure that Elastic Stack resources are upgraded in the correct order. Upgrades to dependent stack resources are delayed until the dependency is upgraded. For example, the Kibana upgrade will be rolled out only when the associated Elasticsearch cluster has been upgraded. diff --git a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-users-and-roles.md b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-users-and-roles.md index c42f06ae36..f1240c9b2f 100644 --- a/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-users-and-roles.md +++ b/raw-migrated-files/cloud-on-k8s/cloud-on-k8s/k8s-users-and-roles.md @@ -43,12 +43,12 @@ Do not run the `elasticsearch-service-tokens` command inside an Elasticsearch Po ### Native realm [k8s_native_realm] -You can create custom users in the [Elasticsearch native realm](https://www.elastic.co/guide/en/elasticsearch/reference/current/native-realm.html) using [Elasticsearch user management APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-security). +You can create custom users in the [Elasticsearch native realm](/deploy-manage/users-roles/cluster-or-deployment-auth/native.md) using [Elasticsearch user management APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-security). ### File realm [k8s_file_realm] -Custom users can also be created by providing the desired [file realm content](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-realm.html) or a username and password in Kubernetes secrets, referenced in the Elasticsearch resource. +Custom users can also be created by providing the desired [file realm content](/deploy-manage/users-roles/cluster-or-deployment-auth/file-based.md) or a username and password in Kubernetes secrets, referenced in the Elasticsearch resource. ```yaml apiVersion: elasticsearch.k8s.elastic.co/v1 @@ -117,7 +117,7 @@ stringData: user:jacknich ``` -You can populate the content of both `users` and `users_roles` using the [elasticsearch-users](https://www.elastic.co/guide/en/elasticsearch/reference/current/users-command.html) tool. +You can populate the content of both `users` and `users_roles` using the [elasticsearch-users](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/command-line-tools/users-command.md) tool. For example, invoking the tool in a Docker container: diff --git a/raw-migrated-files/cloud/cloud-enterprise/Elastic-Cloud-Enterprise-overview.md b/raw-migrated-files/cloud/cloud-enterprise/Elastic-Cloud-Enterprise-overview.md index ee863ff5d7..c89107e577 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/Elastic-Cloud-Enterprise-overview.md +++ b/raw-migrated-files/cloud/cloud-enterprise/Elastic-Cloud-Enterprise-overview.md @@ -27,4 +27,4 @@ ECE evolves from the Elastic hosted Cloud SaaS offering into a standalone produc * Support for off-line installations. * Automated restore and snapshot. -Check the [glossary](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html) to get familiar with the terminology for ECE as well as other Elastic products and solutions. +Check the [glossary](asciidocalypse://docs/docs-content/docs/reference/glossary/index.md) to get familiar with the terminology for ECE as well as other Elastic products and solutions. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-add-custom-bundle-plugin.md b/raw-migrated-files/cloud/cloud-enterprise/ece-add-custom-bundle-plugin.md index 9f5f448cef..6a7b6d3409 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-add-custom-bundle-plugin.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-add-custom-bundle-plugin.md @@ -353,7 +353,7 @@ You do not need to do this step if you are using default filename and password ( } ``` -4. To use this bundle, you can refer it in the [GeoIP processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html) of an ingest pipeline as `MyGeoLite2-City.mmdb` under `database_file` such as: +4. To use this bundle, you can refer it in the [GeoIP processor](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/enrich-processor/geoip-processor.md) of an ingest pipeline as `MyGeoLite2-City.mmdb` under `database_file` such as: ```sh ... diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-add-user-settings.md b/raw-migrated-files/cloud/cloud-enterprise/ece-add-user-settings.md index c9c7bfc2e0..e73b0991b4 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-add-user-settings.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-add-user-settings.md @@ -3,7 +3,7 @@ Change how Elasticsearch runs by providing your own user settings. User settings are appended to the `elasticsearch.yml` configuration file for your cluster and provide custom configuration options. Elastic Cloud Enterprise supports many of the user settings for the version of Elasticsearch that your cluster is running. ::::{tip} -Some settings that could break your cluster if set incorrectly are blocked, such as certain zen discovery and security settings. For examples of a few of the settings that are generally safe in cloud environments, check [Additional Examples of Supported User Settings](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-change-user-settings-examples.html) and [Editing Your User Settings](https://www.elastic.co/guide/en/cloud/current/ec-editing-user-settings.html) that can be enabled on our Elastic Cloud hosted offering. +Some settings that could break your cluster if set incorrectly are blocked, such as certain zen discovery and security settings. For examples of a few of the settings that are generally safe in cloud environments, check [Additional Examples of Supported User Settings](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-change-user-settings-examples.html) and [Editing Your User Settings](/deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) that can be enabled on our Elastic Cloud hosted offering. :::: diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-api-console.md b/raw-migrated-files/cloud/cloud-enterprise/ece-api-console.md index fd47952149..1fd58b7d58 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-api-console.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-api-console.md @@ -7,7 +7,7 @@ API console is intended for admin purposes. Avoid running normal workload like i :::: -You are unable to make Elastic Cloud Enterprise platform changes from the Elasticsearch API. If you want to work with the platform, check the [Elastic Cloud Enterprise RESTful API](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-restful-api.html). +You are unable to make Elastic Cloud Enterprise platform changes from the Elasticsearch API. If you want to work with the platform, check the [Elastic Cloud Enterprise RESTful API](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-enterprise/restful-api.md). 1. [Log into the Cloud UI](../../../deploy-manage/deploy/cloud-enterprise/log-into-cloud-ui.md). 2. On the deployments page, select your deployment. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-autoscaling.md b/raw-migrated-files/cloud/cloud-enterprise/ece-autoscaling.md index 07b8dcf288..87742db84c 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-autoscaling.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-autoscaling.md @@ -57,7 +57,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-node-js.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-node-js.md index 8a9b99e67a..2ae38cdf5f 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-node-js.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-node-js.md @@ -158,7 +158,7 @@ async function run() { run().catch(console.log) ``` -When using the [client.index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. +When using the [client.index](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/api-reference.md#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. ## Search and modify data [ece_search_and_modify_data] @@ -205,7 +205,7 @@ async function update() { update().catch(console.log) ``` -This [more comprehensive list of API examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). +This [more comprehensive list of API examples](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/examples.md) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). ## Switch to API key authentication [ece_switch_to_api_key_authentication] @@ -294,5 +294,5 @@ Schema : When the example code was run an index mapping was created automatically. The field types were selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you are designing the schema for your production use cases. Ingest -: For more advanced scenarios, this [bulk ingestion](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/bulk_examples.html) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, this [bulk ingestion](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/bulk_examples.md) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-python.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-python.md index 3f0efc5a95..ad539fb3d0 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-python.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-python.md @@ -282,7 +282,7 @@ es.get(index='lord-of-the-rings', id='2EkAzngB_pyHD3p65UMt') 'birthplace': 'The Shire'}} ``` -For frequently used API calls with the Python client, check [Examples](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/examples.html). +For frequently used API calls with the Python client, check [Examples](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/examples.md). ## Switch to API key authentication [ece_switch_to_api_key_authentication_2] @@ -357,5 +357,5 @@ Schema : When the example code is run, an index mapping is created automatically. The field types are selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you design the schema for your production use cases. Ingest -: For more advanced scenarios, [Bulk helpers](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/client-helpers.html#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, [Bulk helpers](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/client-helpers.md#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-beats-logstash.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-beats-logstash.md index ec6eaeb15b..044ed8c3d4 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-beats-logstash.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-beats-logstash.md @@ -62,7 +62,7 @@ If you have multiple servers with metrics data, repeat the following steps to co **About Metricbeat modules** -Metricbeat has [many modules](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html) available that collect common metrics. You can [configure additional modules](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-metricbeat.html) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-system.html) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. +Metricbeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-modules.md) available that collect common metrics. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-metricbeat.md) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-system.md) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. **Load the Metricbeat Kibana dashboards** @@ -89,7 +89,7 @@ sudo ./metricbeat setup \ 1. Specify the Cloud ID of your Elastic Cloud Enterprise deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. 2. Specify the username and password provided to you when creating the deployment. Make sure to keep the colon between ** and **. 3. The four lines related to `ssl` are only used when you have a self signed certificate for your Elastic Cloud Enterprise proxy. If needed, specify the full path to the PEM formatted root cetificate (Root CA) used for the Elastic Cloud Enterprise proxy. You can retrieve the certificate chain from your ECE system by following the instructions in [Get existing ECE security certificates](../../../deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md#ece-existing-security-certificates). Save the final certificate in the chain to a file. In this command example, the file is named `elastic-ece-ca-cert.pem`.::::{important} -Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of the metricbeat.yml. +Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of the metricbeat.yml. You might encounter similar permissions hurdles as you work through multiple sections of this document. These permission requirements are there for a good reason, a security safeguard to prevent unauthorized access and modification of key Elastic files. @@ -138,7 +138,7 @@ The next step is to configure Filebeat to send operational data to Logstash. As **Enable the Filebeat system module** -Filebeat has [many modules](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html) available that collect common log types. You can [configure additional modules](https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-modules.html) as needed. For this example we’re using Filebeat’s [System module](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-module-system.html). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. +Filebeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-modules.md) available that collect common log types. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/configuration-filebeat-modules.md) as needed. For this example we’re using Filebeat’s [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-module-system.md). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. 1. Go to */filebeat-/modules.d/* where ** is the directory where Filebeat is installed. 2. Filebeat requires at least one fileset to be enabled. In file */filebeat-/modules.d/system.yml.disabled*, under both `syslog` and `auth` set `enabled` to `true`: @@ -195,7 +195,7 @@ Index setup finished. Loading dashboards (Kibana must be running and reachable) Loaded dashboards Setting up ML using setup --machine-learning is going to be removed in 8.0.0. Please use the ML app instead. -See more: https://www.elastic.co/guide/en/machine-learning/current/index.html +See more: /explore-analyze/machine-learning.md Loaded machine learning job configurations Loaded Ingest pipelines ``` @@ -245,7 +245,7 @@ Now the Filebeat and Metricbeat are set up, let’s configure a {{ls}} pipeline 1. {{ls}} listens for Beats input on the default port of 5044. Only one line is needed to do this. {{ls}} can handle input from many Beats of the same and also of varying types (Metricbeat, Filebeat, and others). 2. This sends output to the standard output, which displays through your command line interface. This plugin enables you to verify the data before you send it to {{es}}, in a later step. -3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](https://www.elastic.co/guide/en/logstash/current/config-examples.html). +3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/config-examples.md). ## Output {{ls}} data to stdout [ece-beats-logstash-stdout] @@ -388,7 +388,7 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ``` 1. Use the Cloud ID of your Elastic Cloud Enterprise deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. + 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/feature-roles.md) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. 3. The cacert line is only needed if you are using a self-signed certificate. @@ -481,9 +481,9 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ::::{note} In this guide, you manually launch each of the Elastic stack applications through the command line interface. In production, you may prefer to configure {{ls}}, Metricbeat, and Filebeat to run as System Services. Check the following pages for the steps to configure each application to run as a service: -* [Running {{ls}} as a service on Debian or RPM](https://www.elastic.co/guide/en/logstash/current/running-logstash.html) -* [Metricbeat and systemd](https://www.elastic.co/guide/en/beats/metricbeat/current/running-with-systemd.html) -* [Start filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-starting.html) +* [Running {{ls}} as a service on Debian or RPM](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/running-logstash.md) +* [Metricbeat and systemd](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/running-with-systemd.md) +* [Start filebeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-starting.md) :::: diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-db-logstash.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-db-logstash.md index 0210dd8a62..ebdd9172c7 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-db-logstash.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-db-logstash.md @@ -1,6 +1,6 @@ # Ingest data from a relational database into Elastic Cloud Enterprise [ece-getting-started-search-use-cases-db-logstash] -This guide explains how to ingest data from a relational database into Elastic Cloud Enterprise through [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html), using the Logstash [JDBC input plugin](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an Elastic Cloud Enterprise deployment. +This guide explains how to ingest data from a relational database into Elastic Cloud Enterprise through [Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md), using the Logstash [JDBC input plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-jdbc.md). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an Elastic Cloud Enterprise deployment. The code and methods presented here have been tested with MySQL. They should work with other relational databases. @@ -286,7 +286,7 @@ In this section, we configure Logstash to send the MySQL data to Elasticsearch. ``` 1. Use the Cloud ID of your Elastic Cloud Enterprise deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. + 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/secure-connection.md) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. 3. This line is only used when you have a self signed certificate for your Elastic Cloud Enterprise proxy. If needed, specify the full path to the PEM formatted root certificate (Root CA) used for the Elastic Cloud Enterprise proxy. You can retrieve the certificate chain from your ECE system by following the instructions in [Get existing ECE security certificates](../../../deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md#ece-existing-security-certificates). Save the final certificate in the chain to a file. In this example, the file is named `elastic-ece-ca-cert.pem`. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-node-logs.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-node-logs.md index 8046025fbd..6f93dbf7ba 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-node-logs.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-node-logs.md @@ -1,6 +1,6 @@ # Ingest logs from a Node.js web application using Filebeat [ece-getting-started-search-use-cases-node-logs] -This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an Elastic Cloud Enterprise deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html#_get_started). +This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an Elastic Cloud Enterprise deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md#_get_started). This guide presents: @@ -33,7 +33,7 @@ For the three following packages, you can create a working directory to install npm install winston ``` -* The [Elastic Common Schema (ECS) formatter](https://www.elastic.co/guide/en/ecs-logging/nodejs/current/winston.html) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: +* The [Elastic Common Schema (ECS) formatter](asciidocalypse://docs/ecs-logging-nodejs/docs/reference/ecs/ecs-logging-nodejs/winston.md) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: ```sh npm install @elastic/ecs-winston-format @@ -302,7 +302,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -338,7 +338,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -439,7 +439,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -522,5 +522,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat, the Node.js web server, and the client. Enter *CTRL + C* in the terminal window for each application to stop them. -You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an Elastic Cloud Enterprise deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-cloud-ingest-data.html#ece-ingest-methods) to learn all about working in Elastic Cloud Enterprise. +You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an Elastic Cloud Enterprise deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-cloud-ingest-data.html#ece-ingest-methods) to learn all about working in Elastic Cloud Enterprise. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-python-logs.md b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-python-logs.md index 720df788d4..8e6bd2c791 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-python-logs.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-getting-started-search-use-cases-python-logs.md @@ -1,6 +1,6 @@ # Ingest logs from a Python application using Filebeat [ece-getting-started-search-use-cases-python-logs] -This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elastic Cloud Enterprise deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html). +This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elastic Cloud Enterprise deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md). You are going to learn how to: @@ -14,7 +14,7 @@ You are going to learn how to: ## Prerequisites [ece_prerequisites_2] -To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](https://www.elastic.co/guide/en/ecs-logging/python/current/installation.html) for the Python logging library. +To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](asciidocalypse://docs/ecs-logging-python/docs/reference/ecs/ecs-logging-python/installation.md) for the Python logging library. To install *ecs-logging-python*, run: @@ -99,7 +99,7 @@ In this step, you’ll create a Python script that generates logs in JSON format Having your logs written in a JSON format with ECS fields allows for easy parsing and analysis, and for standardization with other applications. A standard, easily parsible format becomes increasingly important as the volume and type of data captured in your logs expands over time. - Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](https://www.elastic.co/guide/en/ecs/current/ecs-field-reference.html) for the full list of available fields. + Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-field-reference.md) for the full list of available fields. 2. Let’s give the Python script a test run. Open a terminal instance in the location where you saved *elvis.py* and run the following: @@ -193,7 +193,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -229,7 +229,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -335,7 +335,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -413,5 +413,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat and the Python script. Enter *CTRL + C* in both your Filebeat terminal and in your `elvis.py` terminal. -You now know how to monitor log files from a Python application, deliver the log event data securely into an Elastic Cloud Enterprise deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-cloud-ingest-data.html#ece-ingest-methods) to learn all about working in Elastic Cloud Enterprise. +You now know how to monitor log files from a Python application, deliver the log event data securely into an Elastic Cloud Enterprise deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-cloud-ingest-data.html#ece-ingest-methods) to learn all about working in Elastic Cloud Enterprise. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md b/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md index cc8f80b233..a8af0e872c 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-install-offline.md @@ -10,7 +10,7 @@ Installing ECE on hosts without internet access is commonly referred to as an *o * Be part of the `docker` group to run the installation script. You should not install Elastic Cloud Enterprise as the `root` user. * Set up your [wildcard DNS record](../../../deploy-manage/deploy/cloud-enterprise/ece-wildcard-dns.md). -* Set up and run a local copy of the Elastic Package Repository, otherwise your deployments with APM server and Elastic agent won’t work. Refer to the [Running EPR in airgapped environments](https://www.elastic.co/guide/en/fleet/current/air-gapped.html#air-gapped-diy-epr) documentation. +* Set up and run a local copy of the Elastic Package Repository, otherwise your deployments with APM server and Elastic agent won’t work. Refer to the [Running EPR in airgapped environments](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/air-gapped.md#air-gapped-diy-epr) documentation. * Deployment End-of-life (EOL) information relies on the connection to [https://www.elastic.co/support/eol.json](https://www.elastic.co/support/eol.json). If EOL information is updated, Elastic may require you to reconnect to [https://www.elastic.co/support/eol.json](https://www.elastic.co/support/eol.json) over the Internet to get this information reflected. When you are ready to install ECE, you can proceed: diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-maintenance-mode-routing.md b/raw-migrated-files/cloud/cloud-enterprise/ece-maintenance-mode-routing.md index 0b5fe1236b..8fb5ecb05d 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-maintenance-mode-routing.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-maintenance-mode-routing.md @@ -7,7 +7,7 @@ The {{ecloud}} proxy routes HTTP requests to its deployment’s individual produ It might be helpful to temporarily block upstream requests in order to protect some or all instances or products within your deployment. For example, you might stop request routing in the following cases: * If another team within your company starts streaming new data into your production {{integrations-server}} without previous load testing, both it and {{es}} might experience performance issues. You might consider stopping routing requests on all {{integrations-server}} instances in order to protect your downstream {{es}} instance. -* If {{es}} is being overwhelmed by upstream requests, it might experience increased response times or even become unresponsive. This might impact your ability to resize components in your deployment and increase the duration of pending plans or increase the chance of plan changes failing. Because every {{es}} node is an [implicit coordinating node](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html), you should stop routing requests across all {{es}} nodes to completely block upstream traffic. +* If {{es}} is being overwhelmed by upstream requests, it might experience increased response times or even become unresponsive. This might impact your ability to resize components in your deployment and increase the duration of pending plans or increase the chance of plan changes failing. Because every {{es}} node is an [implicit coordinating node](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md), you should stop routing requests across all {{es}} nodes to completely block upstream traffic. ## Considerations [ece_considerations] diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-manage-apm-settings.md b/raw-migrated-files/cloud/cloud-enterprise/ece-manage-apm-settings.md index b0a6a2b004..3e62e7fcc6 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-manage-apm-settings.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-manage-apm-settings.md @@ -24,7 +24,7 @@ Users running {{stack}} versions 7.16 or 7.17 need to manually configure TLS. Th Pick one of the following options: -1. Upload and configure a publicly signed {{es}} TLS certificates. Check [Encrypt traffic in clusters with a self-managed Fleet Server](https://www.elastic.co/guide/en/fleet/current/secure-connections.html) for details. +1. Upload and configure a publicly signed {{es}} TLS certificates. Check [Encrypt traffic in clusters with a self-managed Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/secure-connections.md) for details. 2. Change the {{es}} hosts where {{agent}}s send data from the default public URL, to the internal URL. In {{kib}}, navigate to **Fleet** and select the **Elastic Cloud agent policy**. Click **Fleet settings** and update the {{es}} hosts URL. For example, if the current URL is `https://123abc.us-central1.gcp.foundit.no:9244`, change it to `http://123abc.containerhost:9244`. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-manage-integrations-server.md b/raw-migrated-files/cloud/cloud-enterprise/ece-manage-integrations-server.md index 0310ad3aa3..dcb7819e3d 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-manage-integrations-server.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-manage-integrations-server.md @@ -1,6 +1,6 @@ # Manage your Integrations Server [ece-manage-integrations-server] -For deployments that are version 8.0 and later, you have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. +For deployments that are version 8.0 and later, you have the option to add a combined [Application Performance Monitoring (APM) Server](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) to your deployment. APM allows you to monitor software services and applications in real time, turning that data into documents stored in the Elasticsearch cluster. Fleet allows you to centrally manage Elastic Agents on many hosts. As part of provisioning, the APM Server and Fleet Server are already configured to work with Elasticsearch and Kibana. At the end of provisioning, you are shown the secret token to configure communication between the APM Server and the backend [APM Agents](https://www.elastic.co/guide/en/apm/agent/index.html). The APM Agents get deployed within your services and applications. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-restful-api-examples-configuring-keystore.md b/raw-migrated-files/cloud/cloud-enterprise/ece-restful-api-examples-configuring-keystore.md index f13a2a1553..f92b442ab1 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-restful-api-examples-configuring-keystore.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-restful-api-examples-configuring-keystore.md @@ -9,7 +9,7 @@ To configure the keystore, you must meet the minimum criteria: * To access the RESTful API for Elastic Cloud Enterprise, you must use your Elastic Cloud Enterprise credentials. -To learn more about the Elasticsearch keystore, refer to the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html). +To learn more about the Elasticsearch keystore, refer to the [Elasticsearch documentation](/deploy-manage/security/secure-settings.md). ## Steps [ece_steps_9] diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-kerberos.md b/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-kerberos.md index 3970965fa2..99758fd330 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-kerberos.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-kerberos.md @@ -10,7 +10,7 @@ The Kerberos credentials are valid against the deployment, not the ECE platform. ## Before you begin [ece_before_you_begin_20] -The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/kerberos-realm.html). +The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md). ## Configure the cluster to use Kerberos [ece-configure-kerberos-settings] diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-oidc.md b/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-oidc.md index d8713b67ec..367b4574af 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-oidc.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-secure-clusters-oidc.md @@ -15,7 +15,7 @@ To prepare for using OpenID Connect for authentication for deployments: * Create or use an existing deployment. Make note of the Kibana endpoint URL, it will be referenced as `` in the following steps. * The steps in this section required a moderate understanding of [OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.md#Authentication) in general and the Authorization Code Grant flow specifically. For more information about OpenID Connect and how it works with the Elastic Stack check: - * Our [configuration guide for Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html#oidc-elasticsearch-authentication). + * Our [configuration guide for Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md#oidc-elasticsearch-authentication). diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-JWT.md b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-JWT.md index e4e09b66a0..f3382884b6 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-JWT.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-JWT.md @@ -102,7 +102,7 @@ xpack: ::::{note} -Refer to [JWT authentication documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/jwt-auth-realm.html) for more details and examples. +Refer to [JWT authentication documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/jwt.md) for more details and examples. :::: diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-SAML.md b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-SAML.md index 42fb11a492..61e235c142 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-SAML.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-SAML.md @@ -36,7 +36,7 @@ You must edit your cluster configuration, sometimes also referred to as the depl 1. Specifies the authentication realm service. 2. Defines the SAML realm name. The SAML realm name can only contain alphanumeric characters, underscores, and hyphens. 3. The order of the SAML realm in your authentication chain. Allowed values are between `2` and `100`. Set to `2` unless you plan on configuring multiple SSO realms for this cluster. - 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. + 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-attributes-mapping) for details and available options. 5. Defines the SAML attribute used for role mapping when configured in Kibana. Common choices are `groups` or `roles`. The values for both `attributes.principal` and `attributes.groups` depend on the IdP provider, so be sure to review their documentation. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. 6. The file path or the HTTPS URL where your IdP metadata is available, such as `https://idpurl.com/sso/saml/metadata`. If you configure a URL you need to make ensure that your Elasticsearch cluster can access it. 7. The SAML EntityID of your IdP. This can be read from the configuration page of the IdP, or its SAML metadata, such as `https://idpurl.com/entity_id`. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ad.md b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ad.md index 77f7b34e0c..fcd06d7d0d 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ad.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ad.md @@ -5,7 +5,7 @@ These steps show how you can secure your {{es}} clusters and Kibana instances wi ## Before you begin [ece_before_you_begin_18] -To learn more about how securing {{es}} clusters with Active Directory works, check [Active Directory user authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/active-directory-realm.html). +To learn more about how securing {{es}} clusters with Active Directory works, check [Active Directory user authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/active-directory.md). ::::{note} The AD credentials are valid against the deployment, not the ECE platform. You can configure [role-based access control](../../../deploy-manage/users-roles/cloud-enterprise-orchestrator/manage-users-roles.md) for the platform separately. @@ -177,7 +177,7 @@ If the keystore is also password protected (which isn’t typical for keystores ## Mapping Active Directory groups to roles [ece-securing-clusters-ad-role-mapping] -You have two ways of mapping Active Directory groups to roles for your users. The preferred one is to use the [role mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping). If for some reason this is not possible, you can use a [role mapping file](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-roles.html) to specify the mappings instead. +You have two ways of mapping Active Directory groups to roles for your users. The preferred one is to use the [role mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping). If for some reason this is not possible, you can use a [role mapping file](/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) to specify the mappings instead. ::::{important} Only Active Directory security groups are supported. You cannot map distribution groups to roles. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ldap.md b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ldap.md index 3ade007882..72b7e44d82 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ldap.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters-ldap.md @@ -5,7 +5,7 @@ These steps show how you can secure your {{es}} clusters and Kibana instances wi ## Before you begin [ece_before_you_begin_17] -To learn more about how securing {{es}} clusters with LDAP works, check [LDAP user authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/ldap-realm.html). +To learn more about how securing {{es}} clusters with LDAP works, check [LDAP user authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md). ::::{note} The LDAP credentials are valid against the deployment, not the ECE platform. You can configure [role-based access control](../../../deploy-manage/users-roles/cloud-enterprise-orchestrator/manage-users-roles.md) for the platform separately. @@ -175,7 +175,7 @@ If your CA certificate is available as a `JKS` or `PKCS#12` keystore, you can up ## Mapping LDAP groups to roles [ece-securing-clusters-ldap-role-mapping] -You have two ways of mapping LDAP groups to roles for your users. The preferred one is to use the [role mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping). If for some reason this is not possible, you can use a [role mapping file](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-roles.html) to specify the mappings instead. +You have two ways of mapping LDAP groups to roles for your users. The preferred one is to use the [role mapping API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping). If for some reason this is not possible, you can use a [role mapping file](/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) to specify the mappings instead. ### Using the Role Mapping API [ece_using_the_role_mapping_api] diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters.md b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters.md index b53d3ae44b..83899715d9 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-securing-clusters.md @@ -7,7 +7,7 @@ Elastic Cloud Enterprise supports most of the security features that are part of * Reset the [`elastic` user password](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). * Use third-party authentication providers like [SAML](../../../deploy-manage/users-roles/cluster-or-deployment-auth/saml.md), [LDAP](../../../deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md), [Active Directory](../../../deploy-manage/users-roles/cluster-or-deployment-auth/active-directory.md), [OpenID Connect](../../../deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md), or [Kerberos](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md) to provide dynamic [role mappings](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) for role based or attribute based access control. * Use {{kib}} Spaces and roles to [secure access to {{kib}}](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). - * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](https://www.elastic.co/guide/en/beats/filebeat/current/beats-api-keys.html). + * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/beats-api-keys.md). * Block unwanted traffic with [traffic filter](../../../deploy-manage/security/traffic-filtering.md). diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-traffic-filtering-deployment-configuration.md b/raw-migrated-files/cloud/cloud-enterprise/ece-traffic-filtering-deployment-configuration.md index 390bcf7952..b6c7e97dcb 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-traffic-filtering-deployment-configuration.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-traffic-filtering-deployment-configuration.md @@ -101,7 +101,7 @@ Follow the instructions that match your use case: ## Troubleshooting [ece-traffic-filter-troubleshooting] -This section offers suggestions on how to troubleshoot your traffic filters. Before you start make sure you check the [Limitations and known problems](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-limitations.html). +This section offers suggestions on how to troubleshoot your traffic filters. Before you start make sure you check the [Limitations and known problems](asciidocalypse://docs/cloud/docs/release-notes/known-issues/cloud-enterprise.md). ### Review the rule sets associated with a deployment [ece-review-rule-sets] diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade-deployment.md b/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade-deployment.md index a68e62e651..21603a73d0 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade-deployment.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade-deployment.md @@ -43,7 +43,7 @@ To upgrade a cluster in Elastic Cloud Enterprise: 4. Select one of the available software versions. Let the user interface guide you through the steps for upgrading a deployment. When you save your changes, your deployment configuration is updated to the new version. ::::{tip} - You cannot downgrade after upgrading, so plan ahead to make sure that your applications still work after upgrading. For more information on changes that might affect your applications, check [Breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes.html). + You cannot downgrade after upgrading, so plan ahead to make sure that your applications still work after upgrading. For more information on changes that might affect your applications, check [Breaking changes](asciidocalypse://docs/elasticsearch/docs/release-notes/breaking-changes/elasticsearch.md). :::: 5. If you are upgrading to version 6.6 and earlier, major upgrades require a full cluster restart to complete the upgrade process. diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade.md b/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade.md index 5d77afeff0..dc5dad8a1e 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece-upgrade.md @@ -24,7 +24,7 @@ The following table shows the recommended upgrade paths from older {{ece}} versi ## The upgrade process [ece-upgrade-overview] -Upgrading Elastic Cloud Enterprise works by replacing the [containers](https://www.elastic.co/guide/en/elastic-stack-glossary/current/terms.html#glossary-container) that ECE itself requires to run on each host. Upgrading ECE does not touch any of the containers that run Elasticsearch clusters and Kibana instances. Each container that needs to be upgraded is renamed and stopped, followed by the creation of a new container with an upgraded instance of the ECE software and its dependencies. When the upgrade process has completed successfully, it cleans up after itself and removes the old containers. +Upgrading Elastic Cloud Enterprise works by replacing the [containers](asciidocalypse://docs/docs-content/docs/reference/glossary/index.md#glossary-container) that ECE itself requires to run on each host. Upgrading ECE does not touch any of the containers that run Elasticsearch clusters and Kibana instances. Each container that needs to be upgraded is renamed and stopped, followed by the creation of a new container with an upgraded instance of the ECE software and its dependencies. When the upgrade process has completed successfully, it cleans up after itself and removes the old containers. The upgrade process creates a `frc-upgraders-monitor` container on the host where you initiate the process that performs the following actions: diff --git a/raw-migrated-files/cloud/cloud-enterprise/ece_optional_settings.md b/raw-migrated-files/cloud/cloud-enterprise/ece_optional_settings.md index 186194d970..feecd44dcc 100644 --- a/raw-migrated-files/cloud/cloud-enterprise/ece_optional_settings.md +++ b/raw-migrated-files/cloud/cloud-enterprise/ece_optional_settings.md @@ -5,5 +5,5 @@ The following optional realm settings are supported: * `force_authn` Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP authenticate the current user. If set to `true`, the IdP is required to verify the user’s identity, irrespective of any existing sessions they might have. Defaults to `false`. * `idp.use_single_logout` Indicates whether to utilise the Identity Provider’s `` (if one exists in the IdP metadata file). Defaults to `true`. -After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](https://www.elastic.co/guide/en/elasticsearch/reference/current/trb-security-saml.html) which contains information about common issues and suggestions for their resolution. +After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](/troubleshoot/elasticsearch/security/trb-security-saml.md) which contains information about common issues and suggestions for their resolution. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-add-user-settings.md b/raw-migrated-files/cloud/cloud-heroku/ech-add-user-settings.md index c5382a5132..3c7e6f0ea2 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-add-user-settings.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-add-user-settings.md @@ -35,7 +35,7 @@ Elasticsearch Add-On for Heroku supports the following `elasticsearch.yml` setti The following general settings are supported: $$$http-cors-settings$$$`http.cors.*` -: Enables cross-origin resource sharing (CORS) settings for the [HTTP module](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html). +: Enables cross-origin resource sharing (CORS) settings for the [HTTP module](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). ::::{note} If your use case depends on the ability to receive CORS requests and you have a cluster that was provisioned prior to January 25th 2019, you must manually set `http.cors.enabled` to `true` and allow a specific set of hosts with `http.cors.allow-origin`. Applying these changes in your Elasticsearch configuration allows cross-origin resource sharing requests. @@ -61,7 +61,7 @@ $$$http-cors-settings$$$`http.cors.*` : To learn more on how to configure reindex SSL user settings, check [configuring reindex SSL parameters](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex). `script.painless.regex.enabled` -: Enables [regular expressions](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-walkthrough.html#modules-scripting-painless-regex) for the Painless scripting language. +: Enables [regular expressions](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/brief-painless-walkthrough.md#modules-scripting-painless-regex) for the Painless scripting language. `action.auto_create_index` : [Automatically create index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) if it doesn’t already exist. @@ -94,7 +94,7 @@ $$$http-cors-settings$$$`http.cors.*` The following circuit breaker settings are supported: `indices.breaker.total.limit` -: Configures [the parent circuit breaker settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#parent-circuit-breaker). +: Configures [the parent circuit breaker settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md#parent-circuit-breaker). `indices.breaker.fielddata.limit` : Configures [the limit for the fielddata breaker](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#fielddata-circuit-breaker). @@ -114,7 +114,7 @@ The following circuit breaker settings are supported: The following indexing pressure settings are supported: `indexing_pressure.memory.limit` -: Configures [the indexing pressure settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-indexing-pressure.html#indexing-pressure-settings). +: Configures [the indexing pressure settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/indexing-pressure-settings.md#indexing-pressure-settings). ### X-Pack [echx_pack] @@ -128,28 +128,28 @@ The following indexing pressure settings are supported: #### All supported versions [echall_supported_versions] `xpack.ml.inference_model.time_to_live` -: Sets the duration of time that the trained models are cached. Check [{{ml-cap}} settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). +: Sets the duration of time that the trained models are cached. Check [{{ml-cap}} settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). `xpack.security.loginAssistanceMessage` : Adds a message to the login screen. Useful for displaying corporate messages. `xpack.security.authc.anonymous.*` -: To learn more on how to enable anonymous access, check [Enabling anonymous access](https://www.elastic.co/guide/en/elasticsearch/reference/current/anonymous-access.html) +: To learn more on how to enable anonymous access, check [Enabling anonymous access](/deploy-manage/users-roles/cluster-or-deployment-auth/anonymous-access.md) `xpack.notification.slack` -: Configures [Slack notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/actions-slack.html#actions-slack). Note that you need to add `secure_url` as a [secret value to the keystore](../../../deploy-manage/security/secure-settings.md). +: Configures [Slack notification settings](/explore-analyze/alerts-cases/watcher/actions-slack.md). Note that you need to add `secure_url` as a [secret value to the keystore](../../../deploy-manage/security/secure-settings.md). `xpack.notification.pagerduty` -: Configures [PagerDuty notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/actions-pagerduty.html#configuring-pagerduty). +: Configures [PagerDuty notification settings](/explore-analyze/alerts-cases/watcher/actions-pagerduty.md#configuring-pagerduty). `xpack.watcher.trigger.schedule.engine` -: Defines when the watch should start, based on date and time [Learn more](https://www.elastic.co/guide/en/elasticsearch/reference/current/trigger-schedule.html). +: Defines when the watch should start, based on date and time [Learn more](/explore-analyze/alerts-cases/watcher/trigger-schedule.md). `xpack.notification.email.html.sanitization.*` -: Enables [email notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html) to sanitize HTML elements in emails that are sent. +: Enables [email notification settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md) to sanitize HTML elements in emails that are sent. `xpack.monitoring.collection.interval` -: Controls [how often data samples are collected](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#monitoring-collection-settings). +: Controls [how often data samples are collected](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#monitoring-collection-settings). `xpack.monitoring.collection.min_interval_seconds` : Specifies the minimum number of seconds that a time bucket in a chart can represent. If you modify the `xpack.monitoring.collection.interval`, use the same value in this setting. @@ -197,7 +197,7 @@ The following search settings are supported: The following disk-based allocation settings are supported: `cluster.routing.allocation.disk.threshold_enabled` -: Enable or disable [disk allocation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#disk-based-shard-allocation) decider and defaults to `true`. +: Enable or disable [disk allocation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#disk-based-shard-allocation) decider and defaults to `true`. `cluster.routing.allocation.disk.watermark.low` : Configures [disk-based shard allocation’s low watermark](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#disk-based-shard-allocation). diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-adding-plugins.md b/raw-migrated-files/cloud/cloud-heroku/ech-adding-plugins.md index 0692739bde..6e9ff42b24 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-adding-plugins.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-adding-plugins.md @@ -13,7 +13,7 @@ There are two ways to add plugins to a deployment in Elasticsearch Add-On for He * [Enable one of the official plugins already available in Elasticsearch Add-On for Heroku](../../../deploy-manage/deploy/elastic-cloud/add-plugins-provided-with-elastic-cloud-hosted.md). * [Upload a custom plugin and then enable it per deployment](../../../deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md). -Custom plugins can include the official {{es}} plugins not provided with Elasticsearch Add-On for Heroku, any of the community-sourced plugins, or [plugins that you write yourself](https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugin-authors.html). Uploading custom plugins is available only to Gold, Platinum, and Enterprise subscriptions. For more information, check [Upload custom plugins and bundles](../../../deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md). +Custom plugins can include the official {{es}} plugins not provided with Elasticsearch Add-On for Heroku, any of the community-sourced plugins, or [plugins that you write yourself](asciidocalypse://docs/elasticsearch/docs/extend/create-elasticsearch-plugins/index.md). Uploading custom plugins is available only to Gold, Platinum, and Enterprise subscriptions. For more information, check [Upload custom plugins and bundles](../../../deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md). To learn more about the official and community-sourced plugins, refer to [{{es}} Plugins and Integrations](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html). diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-autoscaling.md b/raw-migrated-files/cloud/cloud-heroku/ech-autoscaling.md index cb987b9e21..b42cda57ca 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-autoscaling.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-autoscaling.md @@ -57,7 +57,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-custom-bundles.md b/raw-migrated-files/cloud/cloud-heroku/ech-custom-bundles.md index f28d917299..c4c6dccf14 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-custom-bundles.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-custom-bundles.md @@ -76,7 +76,7 @@ Bundles The dictionary `synonyms.txt` can be used as `synonyms.txt` or using the full path `/app/config/synonyms.txt` in the `synonyms_path` of the `synonym-filter`. - To learn more about analyzing with synonyms, check [Synonym token filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html) and [Formatting Synonyms](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/synonym-formats.html). + To learn more about analyzing with synonyms, check [Synonym token filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-synonym-tokenfilter.md) and [Formatting Synonyms](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/synonym-formats.html). **GeoIP database bundle** diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-enable-logging-and-monitoring.md b/raw-migrated-files/cloud/cloud-heroku/ech-enable-logging-and-monitoring.md index 2fdc3f58f2..62482ced4e 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-enable-logging-and-monitoring.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-enable-logging-and-monitoring.md @@ -173,7 +173,7 @@ When shipping logs to a monitoring deployment there are more logging features av #### For {{es}}: [ech-extra-logging-features-elasticsearch] * [Audit logging](../../../deploy-manage/monitor/logging-configuration/enabling-audit-logs.md) - logs security-related events on your deployment -* [Slow query and index logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html) - helps find and debug slow queries and indexing +* [Slow query and index logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/slow-log-settings.md) - helps find and debug slow queries and indexing * Verbose logging - helps debug stack issues by increasing component logs After you’ve enabled log delivery on your deployment, you can [add the Elasticsearch user settings](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to enable these features. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-getting-started.md b/raw-migrated-files/cloud/cloud-heroku/ech-getting-started.md index 45aa133301..17e9ef97f0 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-getting-started.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-getting-started.md @@ -4,7 +4,7 @@ This documentation applies to Heroku users who want to make use of the Elasticse The add-on runs on the Elasticsearch Service and provides access to [Elasticsearch](https://www.elastic.co/products/elasticsearch), the open source, distributed, RESTful search engine. Many other features of the Elastic Stack are also readily available to Heroku users through the [Elasticsearch Add-On for Heroku console](https://cloud.elastic.co?page=docs&placement=docs-body) after you install the add-on. For example, you can use Kibana to visualize your Elasticsearch data. -[Elasticsearch Machine Learning](https://www.elastic.co/guide/en/machine-learning/current/index.html), [Elastic Enterprise Search](https://www.elastic.co/guide/en/enterprise-search/current/index.html), [Elastic APM](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Elastic Fleet Server](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) are not supported by the Elasticsearch Add-On for Heroku. +[Elasticsearch Machine Learning](/explore-analyze/machine-learning.md), [Elastic Enterprise Search](https://www.elastic.co/guide/en/enterprise-search/current/index.html), [Elastic APM](/solutions/observability/apps/application-performance-monitoring-apm.md) and [Elastic Fleet Server](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) are not supported by the Elasticsearch Add-On for Heroku. To learn more about what plans are available for Heroku users and their cost, check the [Elasticsearch add-on](https://elements.heroku.com/addons/foundelasticsearch) in the Elements Marketplace. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md index 910877a5a8..fb1c4144aa 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-manage-kibana-settings.md @@ -43,16 +43,16 @@ If a setting is not supported by Elasticsearch Add-On for Heroku, you will get a ### Version 8.9.0+ [echversion_8_9_0] `xpack.fleet.createArtifactsBulkBatchSize` -: Allow to configure batch size for creating and updating Fleet user artifacts. Examples include creation of Trusted Applications and Endpoint Exceptions in Security. To learn more, check [Fleet settings in Kibana](https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html). +: Allow to configure batch size for creating and updating Fleet user artifacts. Examples include creation of Trusted Applications and Endpoint Exceptions in Security. To learn more, check [Fleet settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/fleet-settings.md). `xpack.securitySolution.maxUploadResponseActionFileBytes` -: Allow to configure the max file upload size for use with the Upload File Repsonse action available with the Defend Integration. To learn more, check [Endpoint Response actions](https://www.elastic.co/guide/en/security/current/response-actions.html). +: Allow to configure the max file upload size for use with the Upload File Repsonse action available with the Defend Integration. To learn more, check [Endpoint Response actions](/solutions/security/endpoint-response-actions.md). ### Version 8.7.0+ [echversion_8_7_0] `xpack.security.session.concurrentSessions.maxSessions` -: Set the maximum number of sessions each user is allowed to have active in {{kib}}. By default, no limit is applied. If set, the value of this option should be an integer between 1 and 1000. When the limit is exceeded, the oldest session is automatically invalidated. To learn more, check [Session management](https://www.elastic.co/guide/en/kibana/current/xpack-security-session-management.html#session-max-sessions). +: Set the maximum number of sessions each user is allowed to have active in {{kib}}. By default, no limit is applied. If set, the value of this option should be an integer between 1 and 1000. When the limit is exceeded, the oldest session is automatically invalidated. To learn more, check [Session management](/deploy-manage/security/kibana-session-management.md#session-max-sessions). `server.securityResponseHeaders.crossOriginOpenerPolicy` : Controls whether the [`Cross-Origin-Opener-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cross-Origin-Opener-Policy) header is used in all responses to the client from the Kibana server. To learn more, see [Configure Kibana](https://www.elastic.co/guide/en/kibana/current/settings.html#server-securityResponseHeaders-crossOriginOpenerPolicy). @@ -224,7 +224,7 @@ If a setting is not supported by Elasticsearch Add-On for Heroku, you will get a If you are using SAML to secure your clusters, these settings are supported in Elasticsearch Add-On for Heroku. -To learn more, refer to [configuring Kibana to use SAML](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-configure-kibana). +To learn more, refer to [configuring Kibana to use SAML](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-configure-kibana). #### Version 8.0.0+ [echversion_8_0_0] @@ -299,17 +299,17 @@ If you are using OpenID Connect to secure your clusters, these settings are supp `xpack.security.authc.oidc.realm` : Specifies which OpenID Connect realm in Elasticsearch should be used. -To learn more, check [configuring Kibana to use OpenID Connect](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html). +To learn more, check [configuring Kibana to use OpenID Connect](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md). ### Anonymous authentication [echanonymous_authentication] -If you want to allow anonymous authentication in Kibana, these settings are supported in Elasticsearch Add-On for Heroku. To learn more on how to enable anonymous access, check [Enabling anonymous access](https://www.elastic.co/guide/en/elasticsearch/reference/current/anonymous-access.html) and [Configuring Kibana to use anonymous authentication](https://www.elastic.co/guide/en/kibana/current/kibana-authentication.html#anonymous-authentication). +If you want to allow anonymous authentication in Kibana, these settings are supported in Elasticsearch Add-On for Heroku. To learn more on how to enable anonymous access, check [Enabling anonymous access](/deploy-manage/users-roles/cluster-or-deployment-auth/anonymous-access.md) and [Configuring Kibana to use anonymous authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/user-authentication.md#anonymous-authentication). #### Supported versions before 8.0.0 [echsupported_versions_before_8_0_0] `xpack.security.sessionTimeout` -: Specifies the session duration in milliseconds. Allows a value between 15000 (15 seconds) and 86400000 (1 day). To learn more, check [Security settings in Kibana](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html). Deprecated in versions 7.6+ and removed in versions 8.0+. +: Specifies the session duration in milliseconds. Allows a value between 15000 (15 seconds) and 86400000 (1 day). To learn more, check [Security settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md). Deprecated in versions 7.6+ and removed in versions 8.0+. #### All supported versions [echall_supported_versions_4] @@ -474,7 +474,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set : Sets the size of the ephemeral queue. Defaults to `10`. `xpack.actions.customHostSettings` -: An array of objects, one per host, containing the SSL/TLS settings used when executing connectors which make HTTPS and SMTP connections to the host servers. For details about using this setting, check [Alerting and action settings in Kibana](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html). +: An array of objects, one per host, containing the SSL/TLS settings used when executing connectors which make HTTPS and SMTP connections to the host servers. For details about using this setting, check [Alerting and action settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md). `xpack.actions.ssl.proxyVerificationMode` : Controls the verification of the proxy server certificate that hosted-ems receives when making an outbound SSL/TLS connection to the host server. Valid values are `full`, `certificate`, and `none`. Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. @@ -588,7 +588,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set : When enabled, specifies the email address to receive cluster alert notifications. `xpack.monitoring.kibana.collection.interval` -: Controls [how often data samples are collected](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#monitoring-collection-settings). +: Controls [how often data samples are collected](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#monitoring-collection-settings). `xpack.monitoring.min_interval_seconds` : Specifies the minimum number of seconds that a time bucket in a chart can represent. If you modify the `xpack.monitoring.kibana.collection.interval`, use the same value in this setting. @@ -599,7 +599,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set `xpack.ml.enabled` : Set to true (default) to enable machine learning. - If set to `false` in `kibana.yml`, the machine learning icon is hidden in this Kibana instance. If `xpack.ml.enabled` is set to `true` in `elasticsearch.yml`, however, you can still use the machine learning APIs. To disable machine learning entirely, check the [Elasticsearch Machine Learning Settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). + If set to `false` in `kibana.yml`, the machine learning icon is hidden in this Kibana instance. If `xpack.ml.enabled` is set to `true` in `elasticsearch.yml`, however, you can still use the machine learning APIs. To disable machine learning entirely, check the [Elasticsearch Machine Learning Settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). #### Content security policy configuration [echcontent_security_policy_configuration] @@ -692,7 +692,7 @@ Each method has its own unique limitations which are important to understand. `xpack.reporting.csv.scroll.duration` -: Amount of [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units) allowed before {{kib}} cleans the scroll context during a CSV export. Valid option is either `auto` or [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units), Defaults to `30s`. +: Amount of [time](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units) allowed before {{kib}} cleans the scroll context during a CSV export. Valid option is either `auto` or [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units), Defaults to `30s`. ::::{note} Support for the The option `auto` was included here, when the config value is set to `auto` the scroll context will be preserved for as long as is possible, before the report task is terminated due to the limits of `xpack.reporting.queue.timeout`. @@ -929,7 +929,7 @@ The following APM settings are supported in Kibana: `xpack.apm.ui.maxTraceItems` : Maximum number of child items displayed when viewing trace details. - Defaults to `1000`. Any positive value is valid. To learn more, check [APM settings in Kibana](https://www.elastic.co/guide/en/kibana/current/apm-settings-kb.html). + Defaults to `1000`. Any positive value is valid. To learn more, check [APM settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/apm-settings.md). `xpack.apm.ui.enabled` diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-monitoring-setup.md b/raw-migrated-files/cloud/cloud-heroku/ech-monitoring-setup.md index 86831b6007..997aef8bc7 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-monitoring-setup.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-monitoring-setup.md @@ -27,7 +27,7 @@ After you have created a new deployment, you should enable shipping logs and met 5. Select **Save**. -Optionally, turn on [audit logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html) to capture security-related events, such as authentication failures, refused connections, and data-access events through the proxy. To turn on audit logging, [edit your deployment’s elasticsearch.yml file](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to add these lines: +Optionally, turn on [audit logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/auding-settings.md) to capture security-related events, such as authentication failures, refused connections, and data-access events through the proxy. To turn on audit logging, [edit your deployment’s elasticsearch.yml file](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to add these lines: ```sh xpack.security.audit.enabled: true @@ -40,7 +40,7 @@ The last two lines are commented out for now but left there as placeholders to e ## View your deployment health [echview_your_deployment_health] -From the monitoring deployment, you can now view your deployment’s health in Kibana using [Stack Monitoring](https://www.elastic.co/guide/en/kibana/current/xpack-monitoring.html): +From the monitoring deployment, you can now view your deployment’s health in Kibana using [Stack Monitoring](/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md): 1. Select the **Kibana** link for your monitoring deployment. 2. From the app menu or the search bar, open **Stack Monitoring**. @@ -50,7 +50,7 @@ From the monitoring deployment, you can now view your deployment’s health in K :::: -To learn more about what [Elasticsearch monitoring metrics](https://www.elastic.co/guide/en/kibana/current/elasticsearch-metrics.html) are available, take a look at the different tabs. For example: +To learn more about what [Elasticsearch monitoring metrics](/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md) are available, take a look at the different tabs. For example: * The **Overview** tab includes information about the search and indexing performance of Elasticsearch and also provides log entries. * The **Nodes** tab can help you monitor cluster CPU performance, JVM strain, and free disk space. @@ -70,13 +70,13 @@ Some [performance metrics](/deploy-manage/monitor/monitoring-data/access-perform If you suspect a performance issue, you can use your monitoring deployment to investigate what is going in Kibana: -* Through **Observability** > **Logs** > **Stream**: This page shows errors in real-time and is part of the same logs Elastic Support reviews when a deployment experiences issues. Check [Tail log files](https://www.elastic.co/guide/en/observability/current/tail-logs.html). -* Through **Discover**: This page is a good option for investigating widespread historical patterns. Check [Discover](https://www.elastic.co/guide/en/kibana/current/discover.html). +* Through **Observability** > **Logs** > **Stream**: This page shows errors in real-time and is part of the same logs Elastic Support reviews when a deployment experiences issues. Check [Tail log files](/solutions/observability/logs/logs-stream.md). +* Through **Discover**: This page is a good option for investigating widespread historical patterns. Check [Discover](/explore-analyze/discover.md). Discover requires a quick setup in Kibana: 1. Go to **Stack Management** > **Data Views** (formerly *Index Patterns*). - 2. Create a [data view](https://www.elastic.co/guide/en/kibana/current/data-views.html) for `elastic-cloud-logs*` and set **Timestamp field** to `@timestamp`: + 2. Create a [data view](/explore-analyze/find-and-organize/data-views.md) for `elastic-cloud-logs*` and set **Timestamp field** to `@timestamp`: :::{image} ../../../images/cloud-heroku-ec-ce-monitoring-logs.png :alt: Create data view example in Kibana @@ -104,14 +104,14 @@ You will get this request reported as a new log. Audit logs do not currently rep ## Get notified [echget_notified] -You should take advantage of the default [Elastic Stack monitoring alerts](https://www.elastic.co/guide/en/kibana/current/kibana-alerts.html) that are available out-of-the-box. You don’t have to do anything other than enable shipping logs and metrics to have them made available to you (which you did earlier on). +You should take advantage of the default [Elastic Stack monitoring alerts](/deploy-manage/monitor/monitoring-data/kibana-alerts.md) that are available out-of-the-box. You don’t have to do anything other than enable shipping logs and metrics to have them made available to you (which you did earlier on). On top of these default alerts that write to indices you can investigate, you might want to add some custom actions, such as a [connector](https://www.elastic.co/guide/en/kibana/current/action-types.html) for Slack notifications. To set up these notifications, you first configure a Slack connector and then append it to the default alerts and actions. From Kibana: 1. Go to **Stack Management** > **Rules and Connectors** > **Connectors** and create your Slack connector: 1. Select **Slack**. - 2. [Create a Slack Webhook URL](https://www.elastic.co/guide/en/kibana/current/slack-action-type.html#configuring-slack) and paste it into the **Webhook URL** field. + 2. [Create a Slack Webhook URL](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/slack-action-type.md#configuring-slack) and paste it into the **Webhook URL** field. 3. Select **Save**. 2. Go to **Stack Monitoring** and select **Enter setup mode**. @@ -145,7 +145,7 @@ When issues come up that you need to troubleshoot, you’ll frequently start wit You can run this query and many others from the API consoles available via: -* **Kibana** > **Dev Tools**. Check [Run Elasticsearch API requests](https://www.elastic.co/guide/en/kibana/current/console-kibana.html). +* **Kibana** > **Dev Tools**. Check [Run Elasticsearch API requests](/explore-analyze/query-filter/tools/console.md). * **Elastic Cloud** > **Deployment** > **Elasticsearch** > **API Console**. Check [Access the Elasticsearch API console](../../../deploy-manage/deploy/elastic-cloud/ech-api-console.md). You can also learn more about the queries you should run for your deployment by reading our blog [Managing and Troubleshooting Elasticsearch Memory](https://www.elastic.co/blog/managing-and-troubleshooting-elasticsearch-memory). diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-password-reset.md b/raw-migrated-files/cloud/cloud-heroku/ech-password-reset.md index 1fc1aa49dc..07a1fc5a3b 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-password-reset.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-password-reset.md @@ -13,7 +13,7 @@ Resetting the `elastic` user password does not interfere with Marketplace integr ::::{note} -The `elastic` user should be not be used unless you have no other way to access your deployment. [Create API keys for ingesting data](https://www.elastic.co/guide/en/beats/filebeat/current/beats-api-keys.html), and create user accounts with [appropriate roles for user access](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). +The `elastic` user should be not be used unless you have no other way to access your deployment. [Create API keys for ingesting data](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/beats-api-keys.md), and create user accounts with [appropriate roles for user access](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). :::: diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-kerberos.md b/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-kerberos.md index 9f84e0cf63..d71f77b03e 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-kerberos.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-kerberos.md @@ -5,7 +5,7 @@ You can secure your Elasticsearch clusters and Kibana instances in a deployment ## Before you begin [echbefore_you_begin_10] -The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/kerberos-realm.html). +The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md). ## Configure the cluster to use Kerberos [ech-configure-kerberos-settings] diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-oidc.md b/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-oidc.md index bf1818f72e..02a804276c 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-oidc.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-secure-clusters-oidc.md @@ -10,7 +10,7 @@ To prepare for using OpenID Connect for authentication for deployments: * Create or use an existing deployment. Make note of the Kibana endpoint URL, it will be referenced as `` in the following steps. * The steps in this section required a moderate understanding of [OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.md#Authentication) in general and the Authorization Code Grant flow specifically. For more information about OpenID Connect and how it works with the Elastic Stack check: - * Our [configuration guide for Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html#oidc-elasticsearch-authentication). + * Our [configuration guide for Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md#oidc-elasticsearch-authentication). diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-JWT.md b/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-JWT.md index 6800c922a2..bfa9fec676 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-JWT.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-JWT.md @@ -97,7 +97,7 @@ xpack: ::::{note} -Refer to [JWT authentication documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/jwt-auth-realm.html) for more details and examples. +Refer to [JWT authentication documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/jwt.md) for more details and examples. :::: diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-SAML.md b/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-SAML.md index d66b27fb44..b40b97adb7 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-SAML.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-securing-clusters-SAML.md @@ -31,7 +31,7 @@ You must edit your cluster configuration, sometimes also referred to as the depl 1. Specifies the authentication realm service. 2. Defines the SAML realm name. The SAML realm name can only contain alphanumeric characters, underscores, and hyphens. 3. The order of the SAML realm in your authentication chain. Allowed values are between `2` and `100`. Set to `2` unless you plan on configuring multiple SSO realms for this cluster. - 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. + 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-attributes-mapping) for details and available options. 5. Defines the SAML attribute used for role mapping when configured in Kibana. Common choices are `groups` or `roles`. The values for both `attributes.principal` and `attributes.groups` depend on the IdP provider, so be sure to review their documentation. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. 6. The file path or the HTTPS URL where your IdP metadata is available, such as `https://idpurl.com/sso/saml/metadata`. If you configure a URL you need to make ensure that your Elasticsearch cluster can access it. 7. The SAML EntityID of your IdP. This can be read from the configuration page of the IdP, or its SAML metadata, such as `https://idpurl.com/entity_id`. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-security.md b/raw-migrated-files/cloud/cloud-heroku/ech-security.md index 79c3bdc540..9eebeec8c8 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-security.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-security.md @@ -7,7 +7,7 @@ The security of Elasticsearch Add-On for Heroku is described on the [{{ecloud}} * Reset the [`elastic` user password](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). * Use third-party authentication providers and services like [SAML](../../../deploy-manage/users-roles/cluster-or-deployment-auth/saml.md), [OpenID Connect](../../../deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md), or [Kerberos](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md) to provide dynamic [role mappings](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) for role based or attribute based access control. * Use {{kib}} Spaces and roles to [secure access to {{kib}}](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). - * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](https://www.elastic.co/guide/en/beats/filebeat/current/beats-api-keys.html). + * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/beats-api-keys.md). * Roles can provide full, or read only, access to your data and can be created in Kibana or directly in Elasticsearch. Check [defining roles](../../../deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles.md) for full details. diff --git a/raw-migrated-files/cloud/cloud-heroku/echsign-outgoing-saml-message.md b/raw-migrated-files/cloud/cloud-heroku/echsign-outgoing-saml-message.md index 4be3720ab3..206ea4b125 100644 --- a/raw-migrated-files/cloud/cloud-heroku/echsign-outgoing-saml-message.md +++ b/raw-migrated-files/cloud/cloud-heroku/echsign-outgoing-saml-message.md @@ -59,6 +59,6 @@ The following optional realm settings are supported: * `force_authn` Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP authenticate the current user. If set to `true`, the IdP is required to verify the user’s identity, irrespective of any existing sessions they might have. Defaults to `false`. * `idp.use_single_logout` Indicates whether to utilise the Identity Provider’s `` (if one exists in the IdP metadata file). Defaults to `true`. -After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](https://www.elastic.co/guide/en/elasticsearch/reference/current/trb-security-saml.html) which contains information about common issues and suggestions for their resolution. +After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](/troubleshoot/elasticsearch/security/trb-security-saml.md) which contains information about common issues and suggestions for their resolution. diff --git a/raw-migrated-files/cloud/cloud/ec-about.md b/raw-migrated-files/cloud/cloud/ec-about.md index f10ec3798d..dca303eefd 100644 --- a/raw-migrated-files/cloud/cloud/ec-about.md +++ b/raw-migrated-files/cloud/cloud/ec-about.md @@ -4,8 +4,8 @@ The information in this section covers: * [Subscription Levels](../../../deploy-manage/license.md) * [Version Policy](../../../deploy-manage/deploy/elastic-cloud/available-stack-versions.md) -* [Elasticsearch Service Hardware](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html) -* [Elasticsearch Service Regions](https://www.elastic.co/guide/en/cloud/current/ec-reference-regions.html) +* [Elasticsearch Service Hardware](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md) +* [Elasticsearch Service Regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/regions.md) * [Service Status](../../../deploy-manage/cloud-organization/service-status.md) * [Getting help](../../../troubleshoot/index.md) * [Restrictions and known problems](../../../deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md) diff --git a/raw-migrated-files/cloud/cloud/ec-add-user-settings.md b/raw-migrated-files/cloud/cloud/ec-add-user-settings.md index 515be42eaa..c800c0016e 100644 --- a/raw-migrated-files/cloud/cloud/ec-add-user-settings.md +++ b/raw-migrated-files/cloud/cloud/ec-add-user-settings.md @@ -35,7 +35,7 @@ Elasticsearch Service supports the following `elasticsearch.yml` settings. The following general settings are supported: $$$http-cors-settings$$$`http.cors.*` -: Enables cross-origin resource sharing (CORS) settings for the [HTTP module](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html). +: Enables cross-origin resource sharing (CORS) settings for the [HTTP module](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/networking-settings.md). ::::{note} If your use case depends on the ability to receive CORS requests and you have a cluster that was provisioned prior to January 25th 2019, you must manually set `http.cors.enabled` to `true` and allow a specific set of hosts with `http.cors.allow-origin`. Applying these changes in your Elasticsearch configuration allows cross-origin resource sharing requests. @@ -61,7 +61,7 @@ $$$http-cors-settings$$$`http.cors.*` : To learn more on how to configure reindex SSL user settings, check [configuring reindex SSL parameters](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex). `script.painless.regex.enabled` -: Enables [regular expressions](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-walkthrough.html#modules-scripting-painless-regex) for the Painless scripting language. +: Enables [regular expressions](asciidocalypse://docs/elasticsearch/docs/reference/scripting-languages/painless/brief-painless-walkthrough.md#modules-scripting-painless-regex) for the Painless scripting language. `action.auto_create_index` : [Automatically create index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) if it doesn’t already exist. @@ -94,7 +94,7 @@ $$$http-cors-settings$$$`http.cors.*` The following circuit breaker settings are supported: `indices.breaker.total.limit` -: Configures [the parent circuit breaker settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#parent-circuit-breaker). +: Configures [the parent circuit breaker settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/circuit-breaker-settings.md#parent-circuit-breaker). `indices.breaker.fielddata.limit` : Configures [the limit for the fielddata breaker](https://www.elastic.co/guide/en/elasticsearch/reference/current/circuit-breaker.html#fielddata-circuit-breaker). @@ -114,7 +114,7 @@ The following circuit breaker settings are supported: The following indexing pressure settings are supported: `indexing_pressure.memory.limit` -: Configures [the indexing pressure settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-indexing-pressure.html#indexing-pressure-settings). +: Configures [the indexing pressure settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/indexing-pressure-settings.md#indexing-pressure-settings). ### X-Pack [ec_x_pack] @@ -128,28 +128,28 @@ The following indexing pressure settings are supported: #### All supported versions [ec_all_supported_versions] `xpack.ml.inference_model.time_to_live` -: Sets the duration of time that the trained models are cached. Check [{{ml-cap}} settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). +: Sets the duration of time that the trained models are cached. Check [{{ml-cap}} settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). `xpack.security.loginAssistanceMessage` : Adds a message to the login screen. Useful for displaying corporate messages. `xpack.security.authc.anonymous.*` -: To learn more on how to enable anonymous access, check [Enabling anonymous access](https://www.elastic.co/guide/en/elasticsearch/reference/current/anonymous-access.html) +: To learn more on how to enable anonymous access, check [Enabling anonymous access](/deploy-manage/users-roles/cluster-or-deployment-auth/anonymous-access.md) `xpack.notification.slack` -: Configures [Slack notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/actions-slack.html#actions-slack). Note that you need to add `secure_url` as a [secret value to the keystore](../../../deploy-manage/security/secure-settings.md). +: Configures [Slack notification settings](/explore-analyze/alerts-cases/watcher/actions-slack.md). Note that you need to add `secure_url` as a [secret value to the keystore](../../../deploy-manage/security/secure-settings.md). `xpack.notification.pagerduty` -: Configures [PagerDuty notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/actions-pagerduty.html#configuring-pagerduty). +: Configures [PagerDuty notification settings](/explore-analyze/alerts-cases/watcher/actions-pagerduty.md#configuring-pagerduty). `xpack.watcher.trigger.schedule.engine` -: Defines when the watch should start, based on date and time [Learn more](https://www.elastic.co/guide/en/elasticsearch/reference/current/trigger-schedule.html). +: Defines when the watch should start, based on date and time [Learn more](/explore-analyze/alerts-cases/watcher/trigger-schedule.md). `xpack.notification.email.html.sanitization.*` -: Enables [email notification settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html) to sanitize HTML elements in emails that are sent. +: Enables [email notification settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/watcher-settings.md) to sanitize HTML elements in emails that are sent. `xpack.monitoring.collection.interval` -: Controls [how often data samples are collected](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#monitoring-collection-settings). +: Controls [how often data samples are collected](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#monitoring-collection-settings). `xpack.monitoring.collection.min_interval_seconds` : Specifies the minimum number of seconds that a time bucket in a chart can represent. If you modify the `xpack.monitoring.collection.interval`, use the same value in this setting. @@ -197,7 +197,7 @@ The following search settings are supported: The following disk-based allocation settings are supported: `cluster.routing.allocation.disk.threshold_enabled` -: Enable or disable [disk allocation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#disk-based-shard-allocation) decider and defaults to `true`. +: Enable or disable [disk allocation](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md#disk-based-shard-allocation) decider and defaults to `true`. `cluster.routing.allocation.disk.watermark.low` : Configures [disk-based shard allocation’s low watermark](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html#disk-based-shard-allocation). diff --git a/raw-migrated-files/cloud/cloud/ec-autoscaling.md b/raw-migrated-files/cloud/cloud/ec-autoscaling.md index 36728fe26b..98d8c2612a 100644 --- a/raw-migrated-files/cloud/cloud/ec-autoscaling.md +++ b/raw-migrated-files/cloud/cloud/ec-autoscaling.md @@ -57,7 +57,7 @@ When past behavior on a hot tier indicates that the influx of data can increase * Through ILM policies. For example, if a deployment has only hot nodes and autoscaling is enabled, it automatically creates warm or cold nodes, if an ILM policy is trying to move data from hot to warm or cold nodes. -On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-create-job). +On machine learning nodes, scaling is determined by an estimate of the memory and CPU requirements for the currently configured jobs and trained models. When a new machine learning job tries to start, it looks for a node with adequate native memory and CPU capacity. If one cannot be found, it stays in an `opening` state. If this waiting job exceeds the queueing limit set in the machine learning decider, a scale up is requested. Conversely, as machine learning jobs run, their memory and CPU usage might decrease or other running jobs might finish or close. In this case, if the duration of decreased resource usage exceeds the set value for `down_scale_delay`, a scale down is requested. Check [Machine learning decider](../../../deploy-manage/autoscaling/autoscaling-deciders.md) for more detail. To learn more about machine learning jobs in general, check [Create anomaly detection jobs](/explore-analyze/machine-learning/anomaly-detection/ml-ad-run-jobs.md#ml-ad-create-job). On a highly available deployment, autoscaling events are always applied to instances in each availability zone simultaneously, to ensure consistency. diff --git a/raw-migrated-files/cloud/cloud/ec-cloud-ingest-data.md b/raw-migrated-files/cloud/cloud/ec-cloud-ingest-data.md index 1ceecbe26f..9d6e25c21e 100644 --- a/raw-migrated-files/cloud/cloud/ec-cloud-ingest-data.md +++ b/raw-migrated-files/cloud/cloud/ec-cloud-ingest-data.md @@ -5,7 +5,7 @@ You have a number of options for getting data into Elasticsearch, referred to as $$$ec-ingest-methods$$$ General content -: Index content like HTML pages, catalogs and other files. Send data directly to Elasticseach from your application using an Elastic language client. Otherwise use Elastic content [connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-connectors.html) or the Elastic [web crawler](https://github.com/elastic/crawler). +: Index content like HTML pages, catalogs and other files. Send data directly to Elasticseach from your application using an Elastic language client. Otherwise use Elastic content [connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/index.md) or the Elastic [web crawler](https://github.com/elastic/crawler). Timestamped data : The preferred way to index timestamped data is to use Elastic Agent. Elastic Agent is a single, unified way to add monitoring for logs, metrics, and other types of data to a host. It can also protect hosts from security threats, query data from operating systems, and forward data from remote services or hardware. Each Elastic Agent based integration includes default ingestion rules, dashboards, and visualizations to start analyzing your data right away. Fleet Management enables you to centrally manage all of your deployed Elastic Agents from Kibana. @@ -143,16 +143,16 @@ One reason for preprocessing your data is to control the structure of the data t ### Data integrity [ec-data-integrity] -Logstash boosts data resiliency for important data that you don’t want to lose. Logstash offers an on-disk [persistent queue (PQ)](https://www.elastic.co/guide/en/logstash/current/persistent-queues.html) that absorbs bursts of events without an external buffering mechanism. It attempts to deliver messages stored in the PQ until delivery succeeds at least once. +Logstash boosts data resiliency for important data that you don’t want to lose. Logstash offers an on-disk [persistent queue (PQ)](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/persistent-queues.md) that absorbs bursts of events without an external buffering mechanism. It attempts to deliver messages stored in the PQ until delivery succeeds at least once. -The Logstash [dead letter queue (DLQ)](https://www.elastic.co/guide/en/logstash/current/dead-letter-queues.html) provides on-disk storage for events that Logstash can’t process, giving you a chance to evaluate them. You can use the dead_letter_queue input plugin to easily reprocess DLQ events. +The Logstash [dead letter queue (DLQ)](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/dead-letter-queues.md) provides on-disk storage for events that Logstash can’t process, giving you a chance to evaluate them. You can use the dead_letter_queue input plugin to easily reprocess DLQ events. ### Data flow [ec-data-flow] If you need to collect data from multiple Beats or Elastic Agents, consider using Logstash as a proxy. Logstash can receive data from multiple endpoints, even on different networks, and send the data on to Elasticsearch through a single firewall rule. You get more security for less work than if you set up individual rules for each endpoint. -Logstash can send to multiple [outputs](https://www.elastic.co/guide/en/logstash/current/output-plugins.html) from a single pipeline to help you get the most value from your data. +Logstash can send to multiple [outputs](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/output-plugins.md) from a single pipeline to help you get the most value from your data. ## Where to go from here [ec-data-ingest-where-to-go] @@ -162,11 +162,11 @@ We have guides and many hands-on tutorials to help get you started with ingestin ### Ingest data for Elastic solutions [ec-ingest-solutions] -[Get started with Elastic Observability](https://www.elastic.co/guide/en/observability/current/observability-get-started.html) -: Use Elastic Observability to gain deeper insight into the behavior of your applications and systems. Follow our guides to ingest various data types, such as [logs and metrics](https://www.elastic.co/guide/en/observability/current/logs-metrics-get-started.html), [traces and APM](https://www.elastic.co/guide/en/observability/current/apm-getting-started-apm-server.html), and [data from Splunk](https://www.elastic.co/guide/en/observability/current/splunk-get-started.html). There are also several [tutorials](https://www.elastic.co/guide/en/observability/current/observability-tutorials.html) to choose from. +[Get started with Elastic Observability](/solutions/observability/get-started.md) +: Use Elastic Observability to gain deeper insight into the behavior of your applications and systems. Follow our guides to ingest various data types, such as [logs and metrics](/solutions/observability/infra-and-hosts/get-started-with-system-metrics.md), [traces and APM](/solutions/observability/apps/get-started-with-apm.md), and [data from Splunk](/solutions/observability/get-started/add-data-from-splunk.md). There are also several [tutorials](https://www.elastic.co/guide/en/observability/current/observability-tutorials.html) to choose from. -[Add data to Elastic Security](https://www.elastic.co/guide/en/security/current/ingest-data.html) -: Use Elastic Security to quickly detect, investigate, and respond to threats and vulnerabilities across your environment. You can use {{agent}} to ingest data into the [{{elastic-defend}} integration](https://www.elastic.co/guide/en/security/current/install-endpoint.html), or with many other [{{integrations}}](https://docs.elastic.co/en/integrations) that work together with {{elastic-sec}}. You can also [ingest data from Splunk](https://www.elastic.co/guide/en/observability/current/splunk-get-started.html) or from various third party collectors that ship [ECS compliant security data](https://www.elastic.co/guide/en/security/current/siem-field-reference.html). +[Add data to Elastic Security](/solutions/security/get-started/ingest-data-to-elastic-security.md) +: Use Elastic Security to quickly detect, investigate, and respond to threats and vulnerabilities across your environment. You can use {{agent}} to ingest data into the [{{elastic-defend}} integration](https://www.elastic.co/guide/en/security/current/install-endpoint.html), or with many other [{{integrations}}](https://docs.elastic.co/en/integrations) that work together with {{elastic-sec}}. You can also [ingest data from Splunk](https://www.elastic.co/guide/en/observability/current/splunk-get-started.html) or from various third party collectors that ship [ECS compliant security data](asciidocalypse://docs/docs-content/docs/reference/security/fields-and-object-schemas/siem-field-reference.md). ### Ingest data with Elastic Agent, Beats, and Logstash [ec-ingest-timestamped] @@ -179,10 +179,10 @@ For users who want to build their own solution, we can help you get started inge [Beats and Elastic Agent comparison](../../../manage-data/ingest/tools.md) : {{beats}} and {{agent}} can both send data to {{es}} either directly or via {{ls}}. You can use this guide to determine which of these primary ingest tools best matches your use case. -[Introduction to Fleet management](https://www.elastic.co/guide/en/fleet/current/fleet-overview.html) +[Introduction to Fleet management](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/index.md) : {{fleet}} provides a web-based UI in Kibana for centrally managing Elastic Agents and their policies. -[{{ls}} introduction](https://www.elastic.co/guide/en/logstash/current/introduction.html) +[{{ls}} introduction](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md) : Use {{ls}} to dynamically unify data from disparate sources and normalize the data into destinations of your choice. @@ -206,13 +206,13 @@ For users who want to build their own solution, we can help you get started inge ### Manipulate and pre-process your data [ec-ingest-manipulate] -[Ingest pipelines](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html) +[Ingest pipelines](/manage-data/ingest/transform-enrich/ingest-pipelines.md) : {{es}} ingest pipelines let you perform common transformations on your data before indexing. -[{{agent}} processors](https://www.elastic.co/guide/en/fleet/current/elastic-agent-processor-configuration.html) +[{{agent}} processors](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/agent-processors.md) : Use the {{agent}} lightweight processors to parse, filter, transform, and enrich data at the source. -[Creating a {{ls}} pipeline](https://www.elastic.co/guide/en/logstash/current/configuration.html) +[Creating a {{ls}} pipeline](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/creating-logstash-pipeline.md) : Create a {{ls}} pipeline by stringing together plugins—​inputs, outputs, filters, and sometimes codecs—​in order to process your data during ingestion. diff --git a/raw-migrated-files/cloud/cloud/ec-custom-bundles.md b/raw-migrated-files/cloud/cloud/ec-custom-bundles.md index 609f7f863c..de5c18b938 100644 --- a/raw-migrated-files/cloud/cloud/ec-custom-bundles.md +++ b/raw-migrated-files/cloud/cloud/ec-custom-bundles.md @@ -76,7 +76,7 @@ Bundles The dictionary `synonyms.txt` can be used as `synonyms.txt` or using the full path `/app/config/synonyms.txt` in the `synonyms_path` of the `synonym-filter`. - To learn more about analyzing with synonyms, check [Synonym token filter](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html) and [Formatting Synonyms](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/synonym-formats.html). + To learn more about analyzing with synonyms, check [Synonym token filter](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/text-analysis/analysis-synonym-tokenfilter.md) and [Formatting Synonyms](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/synonym-formats.html). **GeoIP database bundle** diff --git a/raw-migrated-files/cloud/cloud/ec-enable-logging-and-monitoring.md b/raw-migrated-files/cloud/cloud/ec-enable-logging-and-monitoring.md index 8ce1480bfc..7957951bab 100644 --- a/raw-migrated-files/cloud/cloud/ec-enable-logging-and-monitoring.md +++ b/raw-migrated-files/cloud/cloud/ec-enable-logging-and-monitoring.md @@ -173,7 +173,7 @@ When shipping logs to a monitoring deployment there are more logging features av #### For {{es}}: [ec-extra-logging-features-elasticsearch] * [Audit logging](../../../deploy-manage/monitor/logging-configuration/enabling-audit-logs.md) - logs security-related events on your deployment -* [Slow query and index logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html) - helps find and debug slow queries and indexing +* [Slow query and index logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/slow-log-settings.md) - helps find and debug slow queries and indexing * Verbose logging - helps debug stack issues by increasing component logs After you’ve enabled log delivery on your deployment, you can [add the Elasticsearch user settings](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to enable these features. diff --git a/raw-migrated-files/cloud/cloud/ec-faq-getting-started.md b/raw-migrated-files/cloud/cloud/ec-faq-getting-started.md index 84cd31f781..b6dbed2570 100644 --- a/raw-migrated-files/cloud/cloud/ec-faq-getting-started.md +++ b/raw-migrated-files/cloud/cloud/ec-faq-getting-started.md @@ -37,7 +37,7 @@ This frequently-asked-questions list helps you with common questions while you g : Yes, all subscription levels for Elasticsearch Service include support, handled by email or through the Elastic Support Portal. Different subscription levels include different levels of support. For the Standard subscription level, there is no service-level agreement (SLA) on support response times. Gold and Platinum subscription levels include an SLA on response times to tickets and dedicated resources. To learn more, check [Getting Help](../../../troubleshoot/index.md). $$$faq-where$$$Where is Elasticsearch Service hosted? - : We host our {{es}} clusters on Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. Check out which [regions we support](https://www.elastic.co/guide/en/cloud/current/ec-reference-regions.html) and what [hardware we use](https://www.elastic.co/guide/en/cloud/current/ec-reference-hardware.html). New data centers are added all the time. + : We host our {{es}} clusters on Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. Check out which [regions we support](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/regions.md) and what [hardware we use](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/hardware.md). New data centers are added all the time. $$$faq-vs-aws$$$What is the difference between Elasticsearch Service and the Amazon {{es}} Service? : Elasticsearch Service is the only hosted and managed {{es}} service built, managed, and supported by the company behind {{es}}, {{kib}}, {{beats}}, and {{ls}}. With Elasticsearch Service, you always get the latest versions of the software. Our service is built on best practices and years of experience hosting and managing thousands of {{es}} clusters in the Cloud and on premise. For more information, check the following Amazon and Elastic {{es}} Service [comparison page](https://www.elastic.co/aws-elasticsearch-service). diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-node-js.md b/raw-migrated-files/cloud/cloud/ec-getting-started-node-js.md index 7f0d0eb304..0b9c39acb2 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-node-js.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-node-js.md @@ -150,7 +150,7 @@ async function run() { run().catch(console.log) ``` -When using the [client.index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. +When using the [client.index](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/api-reference.md#_index) API, the request automatically creates the `game-of-thrones` index if it doesn’t already exist, as well as document IDs for each indexed document if they are not explicitly specified. ## Search and modify data [ec_search_and_modify_data] @@ -197,7 +197,7 @@ async function update() { update().catch(console.log) ``` -This [more comprehensive list of API examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). +This [more comprehensive list of API examples](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/examples.md) includes bulk operations, checking the existence of documents, updating by query, deleting, scrolling, and SQL queries. To learn more, check the complete [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html). ## Switch to API key authentication [ec_switch_to_api_key_authentication] @@ -284,11 +284,11 @@ Security Connections -: If your application connecting to Elasticsearch Service runs under the Java security manager, you should at least disable the caching of positive hostname resolutions. To learn more, check the [Java API Client documentation](https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current/_others.html). +: If your application connecting to Elasticsearch Service runs under the Java security manager, you should at least disable the caching of positive hostname resolutions. To learn more, check the [Java API Client documentation](asciidocalypse://docs/elasticsearch-java/docs/reference/elasticsearch/elasticsearch-client-java-api-client/_others.md). Schema : When the example code was run an index mapping was created automatically. The field types were selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you are designing the schema for your production use cases. Ingest -: For more advanced scenarios, this [bulk ingestion](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/bulk_examples.html) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, this [bulk ingestion](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/bulk_examples.md) reference gives an example of the `bulk` API that makes it possible to perform multiple operations in a single call. This bulk example also explicitly specifies document IDs. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-python.md b/raw-migrated-files/cloud/cloud/ec-getting-started-python.md index 616f2830ce..56ac880841 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-python.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-python.md @@ -275,7 +275,7 @@ es.get(index='lord-of-the-rings', id='2EkAzngB_pyHD3p65UMt') 'birthplace': 'The Shire'}} ``` -For frequently used API calls with the Python client, check [Examples](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/examples.html). +For frequently used API calls with the Python client, check [Examples](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/examples.md). ## Switch to API key authentication [ec_switch_to_api_key_authentication_2] @@ -350,5 +350,5 @@ Schema : When the example code is run, an index mapping is created automatically. The field types are selected by {{es}} based on the content seen when the first record was ingested, and updated as new fields appeared in the data. It would be more efficient to specify the fields and field types in advance to optimize performance. Refer to the Elastic Common Schema documentation and Field Type documentation when you design the schema for your production use cases. Ingest -: For more advanced scenarios, [Bulk helpers](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/client-helpers.html#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. +: For more advanced scenarios, [Bulk helpers](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/client-helpers.md#bulk-helpers) gives examples for the `bulk` API that makes it possible to perform multiple operations in a single call. If you have a lot of documents to index, using bulk to batch document operations is significantly faster than submitting requests individually. diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-beats-logstash.md b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-beats-logstash.md index cdbde11cd6..69489e3b75 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-beats-logstash.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-beats-logstash.md @@ -65,7 +65,7 @@ If you have multiple servers with metrics data, repeat the following steps to co **About Metricbeat modules** -Metricbeat has [many modules](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html) available that collect common metrics. You can [configure additional modules](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-metricbeat.html) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-system.html) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. +Metricbeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-modules.md) available that collect common metrics. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/configuration-metricbeat.md) as needed. For this example we’re using Metricbeat’s default configuration, which has the [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/metricbeat-module-system.md) enabled. The System module allows you to monitor servers with the default set of metrics: *cpu*, *load*, *memory*, *network*, *process*, *process_summary*, *socket_summary*, *filesystem*, *fsstat*, and *uptime*. **Load the Metricbeat Kibana dashboards** @@ -87,7 +87,7 @@ sudo ./metricbeat setup \ 1. Specify the Cloud ID of your Elasticsearch Service deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. 2. Specify the username and password provided to you when creating the deployment. Make sure to keep the colon between ** and **.::::{important} -Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of the metricbeat.yml. +Depending on variables including the installation location, environment and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of the metricbeat.yml. You might encounter similar permissions hurdles as you work through multiple sections of this document. These permission requirements are there for a good reason, a security safeguard to prevent unauthorized access and modification of key Elastic files. @@ -136,7 +136,7 @@ The next step is to configure Filebeat to send operational data to Logstash. As **Enable the Filebeat system module** -Filebeat has [many modules](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html) available that collect common log types. You can [configure additional modules](https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-modules.html) as needed. For this example we’re using Filebeat’s [System module](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-module-system.html). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. +Filebeat has [many modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-modules.md) available that collect common log types. You can [configure additional modules](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/configuration-filebeat-modules.md) as needed. For this example we’re using Filebeat’s [System module](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-module-system.md). This module reads in the various system log files (with information including login successes or failures, sudo command usage, and other key usage details) based on the detected operating system. For this example, a Linux-based OS is used and Filebeat ingests logs from the */var/log/* folder. It’s important to verify that Filebeat is given permission to access your logs folder through standard file and folder permissions. 1. Go to */filebeat-/modules.d/* where ** is the directory where Filebeat is installed. 2. Filebeat requires at least one fileset to be enabled. In file */filebeat-/modules.d/system.yml.disabled*, under both `syslog` and `auth` set `enabled` to `true`: @@ -188,7 +188,7 @@ Index setup finished. Loading dashboards (Kibana must be running and reachable) Loaded dashboards Setting up ML using setup --machine-learning is going to be removed in 8.0.0. Please use the ML app instead. -See more: https://www.elastic.co/guide/en/machine-learning/current/index.html +See more: /explore-analyze/machine-learning.md Loaded machine learning job configurations Loaded Ingest pipelines ``` @@ -238,7 +238,7 @@ Now the Filebeat and Metricbeat are set up, let’s configure a {{ls}} pipeline 1. {{ls}} listens for Beats input on the default port of 5044. Only one line is needed to do this. {{ls}} can handle input from many Beats of the same and also of varying types (Metricbeat, Filebeat, and others). 2. This sends output to the standard output, which displays through your command line interface. This plugin enables you to verify the data before you send it to {{es}}, in a later step. -3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](https://www.elastic.co/guide/en/logstash/current/config-examples.html). +3. Save the new *beats.conf* file in your Logstash folder. To learn more about the file format and options, check [{{ls}} Configuration Examples](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/config-examples.md). ## Output {{ls}} data to stdout [ec-beats-logstash-stdout] @@ -380,7 +380,7 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ``` 1. Use the Cloud ID of your Elasticsearch Service deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. + 2. the default usename is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check the [Grant access to secured resources](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/feature-roles.md) for information on the writer role and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Grant access to secured resources](https://www.elastic.co/guide/en/beats/filebeat/current/feature-roles.html) documentation. Following are some additional details about the configuration file settings: @@ -472,9 +472,9 @@ In this section, you configure {{ls}} to send the Metricbeat and Filebeat data t ::::{note} In this guide, you manually launch each of the Elastic stack applications through the command line interface. In production, you may prefer to configure {{ls}}, Metricbeat, and Filebeat to run as System Services. Check the following pages for the steps to configure each application to run as a service: -* [Running {{ls}} as a service on Debian or RPM](https://www.elastic.co/guide/en/logstash/current/running-logstash.html) -* [Metricbeat and systemd](https://www.elastic.co/guide/en/beats/metricbeat/current/running-with-systemd.html) -* [Start filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-starting.html) +* [Running {{ls}} as a service on Debian or RPM](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/running-logstash.md) +* [Metricbeat and systemd](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-metricbeat/running-with-systemd.md) +* [Start filebeat](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-starting.md) :::: diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-db-logstash.md b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-db-logstash.md index a2810c9ee1..33e36f5ebd 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-db-logstash.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-db-logstash.md @@ -1,6 +1,6 @@ # Ingest data from a relational database into Elasticsearch Service [ec-getting-started-search-use-cases-db-logstash] -This guide explains how to ingest data from a relational database into Elasticsearch Service through [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html), using the Logstash [JDBC input plugin](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an Elasticsearch Service deployment. +This guide explains how to ingest data from a relational database into Elasticsearch Service through [Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md), using the Logstash [JDBC input plugin](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/plugins-inputs-jdbc.md). It demonstrates how Logstash can be used to efficiently copy records and to receive updates from a relational database, and then send them into {{es}} in an Elasticsearch Service deployment. The code and methods presented here have been tested with MySQL. They should work with other relational databases. @@ -288,7 +288,7 @@ In this section, we configure Logstash to send the MySQL data to Elasticsearch. ``` 1. Use the Cloud ID of your Elasticsearch Service deployment. You can include or omit the `:` prefix at the beginning of the Cloud ID. Both versions work fine. Find your Cloud ID by going to the {{kib}} main menu and selecting Management > Integrations, and then selecting View deployment details. - 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. + 2. the default username is `elastic`. It is not recommended to use the `elastic` account for ingesting data as this is a superuser. We recommend using a user with reduced permissions, or an API Key with permissions specific to the indices or data streams that will be written to. Check [Configuring security in Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/secure-connection.md) for information on roles and API Keys. Use the password provided when you created the deployment if using the `elastic` user, or the password used when creating a new ingest user with the roles specified in the [Configuring security in Logstash](https://www.elastic.co/guide/en/logstash/current/ls-security.html) documentation. Following are some additional details about the configuration file settings: diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-node-logs.md b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-node-logs.md index ab84b966b7..5c05c404d3 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-node-logs.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-node-logs.md @@ -1,6 +1,6 @@ # Ingest logs from a Node.js web application using Filebeat [ec-getting-started-search-use-cases-node-logs] -This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html#_get_started). +This guide demonstrates how to ingest logs from a Node.js web application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in Kibana as requests are made to the Node.js server. While Node.js is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md#_get_started). This guide presents: @@ -33,7 +33,7 @@ For the three following packages, you can create a working directory to install npm install winston ``` -* The [Elastic Common Schema (ECS) formatter](https://www.elastic.co/guide/en/ecs-logging/nodejs/current/winston.html) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: +* The [Elastic Common Schema (ECS) formatter](asciidocalypse://docs/ecs-logging-nodejs/docs/reference/ecs/ecs-logging-nodejs/winston.md) for the Node.js winston logger - This plugin formats your Node.js logs into an ECS structured JSON format ideally suited for ingestion into Elasticsearch. To install the ECS winston logger, run the following command in your working directory so that the package is installed in the same location as the winston package: ```sh npm install @elastic/ecs-winston-format @@ -297,7 +297,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -333,7 +333,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -434,7 +434,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -517,5 +517,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat, the Node.js web server, and the client. Enter *CTRL + C* in the terminal window for each application to stop them. -You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an Elasticsearch Service deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about working in Elasticsearch Service. +You now know how to monitor log files from a Node.js web application, deliver the log event data securely into an Elasticsearch Service deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about working in Elasticsearch Service. diff --git a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-python-logs.md b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-python-logs.md index 847ee7f13b..a796936080 100644 --- a/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-python-logs.md +++ b/raw-migrated-files/cloud/cloud/ec-getting-started-search-use-cases-python-logs.md @@ -1,6 +1,6 @@ # Ingest logs from a Python application using Filebeat [ec-getting-started-search-use-cases-python-logs] -This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html). +This guide demonstrates how to ingest logs from a Python application and deliver them securely into an Elasticsearch Service deployment. You’ll set up Filebeat to monitor a JSON-structured log file that has standard Elastic Common Schema (ECS) formatted fields, and you’ll then view real-time visualizations of the log events in {{kib}} as they occur. While Python is used for this example, this approach to monitoring log output is applicable across many client types. Check the list of [available ECS logging plugins](asciidocalypse://docs/ecs-logging/docs/reference/ecs/ecs-logging-overview/intro.md). You are going to learn how to: @@ -14,7 +14,7 @@ You are going to learn how to: ## Prerequisites [ec_prerequisites_2] -To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](https://www.elastic.co/guide/en/ecs-logging/python/current/installation.html) for the Python logging library. +To complete these steps you need to have [Python](https://www.python.org/) installed on your system as well as the [Elastic Common Schema (ECS) logger](asciidocalypse://docs/ecs-logging-python/docs/reference/ecs/ecs-logging-python/installation.md) for the Python logging library. To install *ecs-logging-python*, run: @@ -102,7 +102,7 @@ In this step, you’ll create a Python script that generates logs in JSON format Having your logs written in a JSON format with ECS fields allows for easy parsing and analysis, and for standardization with other applications. A standard, easily parsible format becomes increasingly important as the volume and type of data captured in your logs expands over time. - Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](https://www.elastic.co/guide/en/ecs/current/ecs-field-reference.html) for the full list of available fields. + Together with the standard fields included for each log entry is an extra *http.request.body.content* field. This extra field is there just to give you some additional, interesting data to work with, and also to demonstrate how you can add optional fields to your log data. Check the [ECS Field Reference](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-field-reference.md) for the full list of available fields. 2. Let’s give the Python script a test run. Open a terminal instance in the location where you saved *elvis.py* and run the following: @@ -188,7 +188,7 @@ For this example, Filebeat uses the following four decoding options. json.expand_keys: true ``` -To learn more about these settings, check [JSON input configuration options](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html#filebeat-input-log-config-json) and [Decode JSON fields](https://www.elastic.co/guide/en/beats/filebeat/current/decode-json-fields.html) in the Filebeat Reference. +To learn more about these settings, check [JSON input configuration options](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-input-log.md#filebeat-input-log-config-json) and [Decode JSON fields](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/decode-json-fields.md) in the Filebeat Reference. Append the four JSON decoding options to the *Filebeat inputs* section of *filebeat.yml*, so that the section now looks like this: @@ -224,7 +224,7 @@ Filebeat comes with predefined assets for parsing, indexing, and visualizing you ``` ::::{important} -Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. +Depending on variables including the installation location, environment, and local permissions, you might need to [change the ownership](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-libbeat/config-file-permissions.md) of filebeat.yml. You can also try running the command as *root*: *sudo ./filebeat setup -e* or you can disable strict permission checks by running the command with the `--strict.perms=false` option. :::: @@ -330,7 +330,7 @@ In this command: * The *-c* flag specifies the path to the Filebeat config file. ::::{note} -Just in case the command doesn’t work as expected, check the [Filebeat quick start](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. +Just in case the command doesn’t work as expected, check the [Filebeat quick start](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-installation-configuration.md#start) for the detailed command syntax for your operating system. You can also try running the command as *root*: *sudo ./filebeat -e -c filebeat.yml*. :::: @@ -408,5 +408,5 @@ You can add titles to the visualizations, resize and position them as you like, 2. As your final step, remember to stop Filebeat and the Python script. Enter *CTRL + C* in both your Filebeat terminal and in your `elvis.py` terminal. -You now know how to monitor log files from a Python application, deliver the log event data securely into an Elasticsearch Service deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about working in Elasticsearch Service. +You now know how to monitor log files from a Python application, deliver the log event data securely into an Elasticsearch Service deployment, and then visualize the results in Kibana in real time. Consult the [Filebeat documentation](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/filebeat-overview.md) to learn more about the ingestion and processing options available for your data. You can also explore our [documentation](../../../manage-data/ingest.md) to learn all about working in Elasticsearch Service. diff --git a/raw-migrated-files/cloud/cloud/ec-maintenance-mode-routing.md b/raw-migrated-files/cloud/cloud/ec-maintenance-mode-routing.md index 4d58a2d3d2..7704331a32 100644 --- a/raw-migrated-files/cloud/cloud/ec-maintenance-mode-routing.md +++ b/raw-migrated-files/cloud/cloud/ec-maintenance-mode-routing.md @@ -7,7 +7,7 @@ The {{ecloud}} proxy routes HTTP requests to its deployment’s individual produ It might be helpful to temporarily block upstream requests in order to protect some or all instances or products within your deployment. For example, you might stop request routing in the following cases: * If another team within your company starts streaming new data into your production {{integrations-server}} without previous load testing, both it and {{es}} might experience performance issues. You might consider stopping routing requests on all {{integrations-server}} instances in order to protect your downstream {{es}} instance. -* If {{es}} is being overwhelmed by upstream requests, it might experience increased response times or even become unresponsive. This might impact your ability to resize components in your deployment and increase the duration of pending plans or increase the chance of plan changes failing. Because every {{es}} node is an [implicit coordinating node](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html), you should stop routing requests across all {{es}} nodes to completely block upstream traffic. +* If {{es}} is being overwhelmed by upstream requests, it might experience increased response times or even become unresponsive. This might impact your ability to resize components in your deployment and increase the duration of pending plans or increase the chance of plan changes failing. Because every {{es}} node is an [implicit coordinating node](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/node-settings.md), you should stop routing requests across all {{es}} nodes to completely block upstream traffic. ## Considerations [ec_considerations] diff --git a/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md b/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md index 747973ad6f..c8b855e504 100644 --- a/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md +++ b/raw-migrated-files/cloud/cloud/ec-manage-kibana-settings.md @@ -43,16 +43,16 @@ If a setting is not supported by Elasticsearch Service, you will get an error me ### Version 8.9.0+ [ec_version_8_9_0] `xpack.fleet.createArtifactsBulkBatchSize` -: Allow to configure batch size for creating and updating Fleet user artifacts. Examples include creation of Trusted Applications and Endpoint Exceptions in Security. To learn more, check [Fleet settings in Kibana](https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html). +: Allow to configure batch size for creating and updating Fleet user artifacts. Examples include creation of Trusted Applications and Endpoint Exceptions in Security. To learn more, check [Fleet settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/fleet-settings.md). `xpack.securitySolution.maxUploadResponseActionFileBytes` -: Allow to configure the max file upload size for use with the Upload File Repsonse action available with the Defend Integration. To learn more, check [Endpoint Response actions](https://www.elastic.co/guide/en/security/current/response-actions.html). +: Allow to configure the max file upload size for use with the Upload File Repsonse action available with the Defend Integration. To learn more, check [Endpoint Response actions](/solutions/security/endpoint-response-actions.md). ### Version 8.7.0+ [ec_version_8_7_0] `xpack.security.session.concurrentSessions.maxSessions` -: Set the maximum number of sessions each user is allowed to have active in {{kib}}. By default, no limit is applied. If set, the value of this option should be an integer between 1 and 1000. When the limit is exceeded, the oldest session is automatically invalidated. To learn more, check [Session management](https://www.elastic.co/guide/en/kibana/current/xpack-security-session-management.html#session-max-sessions). +: Set the maximum number of sessions each user is allowed to have active in {{kib}}. By default, no limit is applied. If set, the value of this option should be an integer between 1 and 1000. When the limit is exceeded, the oldest session is automatically invalidated. To learn more, check [Session management](/deploy-manage/security/kibana-session-management.md#session-max-sessions). `server.securityResponseHeaders.crossOriginOpenerPolicy` : Controls whether the [`Cross-Origin-Opener-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cross-Origin-Opener-Policy) header is used in all responses to the client from the Kibana server. To learn more, see [Configure Kibana](https://www.elastic.co/guide/en/kibana/current/settings.html#server-securityResponseHeaders-crossOriginOpenerPolicy). @@ -224,7 +224,7 @@ If a setting is not supported by Elasticsearch Service, you will get an error me If you are using SAML to secure your clusters, these settings are supported in Elasticsearch Service. -To learn more, refer to [configuring Kibana to use SAML](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-configure-kibana). +To learn more, refer to [configuring Kibana to use SAML](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-configure-kibana). #### Version 8.0.0+ [ec_version_8_0_0] @@ -299,17 +299,17 @@ If you are using OpenID Connect to secure your clusters, these settings are supp `xpack.security.authc.oidc.realm` : Specifies which OpenID Connect realm in Elasticsearch should be used. -To learn more, check [configuring Kibana to use OpenID Connect](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html). +To learn more, check [configuring Kibana to use OpenID Connect](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md). ### Anonymous authentication [ec_anonymous_authentication] -If you want to allow anonymous authentication in Kibana, these settings are supported in Elasticsearch Service. To learn more on how to enable anonymous access, check [Enabling anonymous access](https://www.elastic.co/guide/en/elasticsearch/reference/current/anonymous-access.html) and [Configuring Kibana to use anonymous authentication](https://www.elastic.co/guide/en/kibana/current/kibana-authentication.html#anonymous-authentication). +If you want to allow anonymous authentication in Kibana, these settings are supported in Elasticsearch Service. To learn more on how to enable anonymous access, check [Enabling anonymous access](/deploy-manage/users-roles/cluster-or-deployment-auth/anonymous-access.md) and [Configuring Kibana to use anonymous authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/user-authentication.md#anonymous-authentication). #### Supported versions before 8.0.0 [ec_supported_versions_before_8_0_0] `xpack.security.sessionTimeout` -: Specifies the session duration in milliseconds. Allows a value between 15000 (15 seconds) and 86400000 (1 day). To learn more, check [Security settings in Kibana](https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html). Deprecated in versions 7.6+ and removed in versions 8.0+. +: Specifies the session duration in milliseconds. Allows a value between 15000 (15 seconds) and 86400000 (1 day). To learn more, check [Security settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/security-settings.md). Deprecated in versions 7.6+ and removed in versions 8.0+. #### All supported versions [ec_all_supported_versions_4] @@ -474,7 +474,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set : Sets the size of the ephemeral queue. Defaults to `10`. `xpack.actions.customHostSettings` -: An array of objects, one per host, containing the SSL/TLS settings used when executing connectors which make HTTPS and SMTP connections to the host servers. For details about using this setting, check [Alerting and action settings in Kibana](https://www.elastic.co/guide/en/kibana/current/alert-action-settings-kb.html). +: An array of objects, one per host, containing the SSL/TLS settings used when executing connectors which make HTTPS and SMTP connections to the host servers. For details about using this setting, check [Alerting and action settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/alerting-settings.md). `xpack.actions.ssl.proxyVerificationMode` : Controls the verification of the proxy server certificate that hosted-ems receives when making an outbound SSL/TLS connection to the host server. Valid values are `full`, `certificate`, and `none`. Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. @@ -588,7 +588,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set : When enabled, specifies the email address to receive cluster alert notifications. `xpack.monitoring.kibana.collection.interval` -: Controls [how often data samples are collected](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#monitoring-collection-settings). +: Controls [how often data samples are collected](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/monitoring-settings.md#monitoring-collection-settings). `xpack.monitoring.min_interval_seconds` : Specifies the minimum number of seconds that a time bucket in a chart can represent. If you modify the `xpack.monitoring.kibana.collection.interval`, use the same value in this setting. @@ -599,7 +599,7 @@ This setting is not available in versions 8.0.0 through 8.2.0. As such, this set `xpack.ml.enabled` : Set to true (default) to enable machine learning. - If set to `false` in `kibana.yml`, the machine learning icon is hidden in this Kibana instance. If `xpack.ml.enabled` is set to `true` in `elasticsearch.yml`, however, you can still use the machine learning APIs. To disable machine learning entirely, check the [Elasticsearch Machine Learning Settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html). + If set to `false` in `kibana.yml`, the machine learning icon is hidden in this Kibana instance. If `xpack.ml.enabled` is set to `true` in `elasticsearch.yml`, however, you can still use the machine learning APIs. To disable machine learning entirely, check the [Elasticsearch Machine Learning Settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/machine-learning-settings.md). #### Content security policy configuration [ec_content_security_policy_configuration] @@ -692,7 +692,7 @@ Each method has its own unique limitations which are important to understand. `xpack.reporting.csv.scroll.duration` -: Amount of [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units) allowed before {{kib}} cleans the scroll context during a CSV export. Valid option is either `auto` or [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units), Defaults to `30s`. +: Amount of [time](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/rest-apis/api-conventions.md#time-units) allowed before {{kib}} cleans the scroll context during a CSV export. Valid option is either `auto` or [time](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units), Defaults to `30s`. ::::{note} Support for the The option `auto` was included here, when the config value is set to `auto` the scroll context will be preserved for as long as is possible, before the report task is terminated due to the limits of `xpack.reporting.queue.timeout`. @@ -929,7 +929,7 @@ The following APM settings are supported in Kibana: `xpack.apm.ui.maxTraceItems` : Maximum number of child items displayed when viewing trace details. - Defaults to `1000`. Any positive value is valid. To learn more, check [APM settings in Kibana](https://www.elastic.co/guide/en/kibana/current/apm-settings-kb.html). + Defaults to `1000`. Any positive value is valid. To learn more, check [APM settings in Kibana](asciidocalypse://docs/kibana/docs/reference/configuration-reference/apm-settings.md). `xpack.apm.ui.enabled` diff --git a/raw-migrated-files/cloud/cloud/ec-metrics-memory-pressure.md b/raw-migrated-files/cloud/cloud/ec-metrics-memory-pressure.md index 1ae7ac3f60..38a3d326aa 100644 --- a/raw-migrated-files/cloud/cloud/ec-metrics-memory-pressure.md +++ b/raw-migrated-files/cloud/cloud/ec-metrics-memory-pressure.md @@ -35,15 +35,15 @@ By understanding and adjusting the way your data is indexed, retained, and searc ### Sharding strategy [ec_sharding_strategy] -{{es}} indices are divided into shards. Understanding shards is important when tuning {{es}}. Check [Size your shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html) in the {{es}} documentation to learn more. +{{es}} indices are divided into shards. Understanding shards is important when tuning {{es}}. Check [Size your shards](/deploy-manage/production-guidance/optimize-performance/size-shards.md) in the {{es}} documentation to learn more. ### Data retention [ec_data_retention] -The total amount of data being searched affects search performance. Check the tutorial [Automate rollover with index lifecycle management](https://www.elastic.co/guide/en/elasticsearch/reference/current/getting-started-index-lifecycle-management.html) (ILM) to automate data retention policies. +The total amount of data being searched affects search performance. Check the tutorial [Automate rollover with index lifecycle management](/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md) (ILM) to automate data retention policies. ### Tune for search speed [ec_tune_for_search_speed] -The documentation [Tune for search speed](https://www.elastic.co/guide/en/elasticsearch/reference/current/tune-for-search-speed.html) provides details on how to analyze queries, optimize field types, minimize the fields searched, and more. +The documentation [Tune for search speed](/deploy-manage/production-guidance/optimize-performance/search-speed.md) provides details on how to analyze queries, optimize field types, minimize the fields searched, and more. diff --git a/raw-migrated-files/cloud/cloud/ec-monitoring-setup.md b/raw-migrated-files/cloud/cloud/ec-monitoring-setup.md index 2ffe411c2a..66bdee1492 100644 --- a/raw-migrated-files/cloud/cloud/ec-monitoring-setup.md +++ b/raw-migrated-files/cloud/cloud/ec-monitoring-setup.md @@ -27,7 +27,7 @@ After you have created a new deployment, you should enable shipping logs and met 5. Select **Save**. -Optionally, turn on [audit logging](https://www.elastic.co/guide/en/elasticsearch/reference/current/auditing-settings.html) to capture security-related events, such as authentication failures, refused connections, and data-access events through the proxy. To turn on audit logging, [edit your deployment’s elasticsearch.yml file](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to add these lines: +Optionally, turn on [audit logging](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/configuration-reference/auding-settings.md) to capture security-related events, such as authentication failures, refused connections, and data-access events through the proxy. To turn on audit logging, [edit your deployment’s elasticsearch.yml file](../../../deploy-manage/deploy/elastic-cloud/edit-stack-settings.md) to add these lines: ```sh xpack.security.audit.enabled: true @@ -40,7 +40,7 @@ The last two lines are commented out for now but left there as placeholders to e ## View your deployment health [ec_view_your_deployment_health] -From the monitoring deployment, you can now view your deployment’s health in Kibana using [Stack Monitoring](https://www.elastic.co/guide/en/kibana/current/xpack-monitoring.html): +From the monitoring deployment, you can now view your deployment’s health in Kibana using [Stack Monitoring](/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md): 1. Select the **Kibana** link for your monitoring deployment. 2. From the app menu or the search bar, open **Stack Monitoring**. @@ -50,7 +50,7 @@ From the monitoring deployment, you can now view your deployment’s health in K :::: -To learn more about what [Elasticsearch monitoring metrics](https://www.elastic.co/guide/en/kibana/current/elasticsearch-metrics.html) are available, take a look at the different tabs. For example: +To learn more about what [Elasticsearch monitoring metrics](/deploy-manage/monitor/monitoring-data/elasticsearch-metrics.md) are available, take a look at the different tabs. For example: * The **Overview** tab includes information about the search and indexing performance of Elasticsearch and also provides log entries. * The **Nodes** tab can help you monitor cluster CPU performance, JVM strain, and free disk space. @@ -70,13 +70,13 @@ Some [performance metrics](../../../deploy-manage/monitor/monitoring-data/ec-saa If you suspect a performance issue, you can use your monitoring deployment to investigate what is going in Kibana: -* Through **Observability** > **Logs** > **Stream**: This page shows errors in real-time and is part of the same logs Elastic Support reviews when a deployment experiences issues. Check [Tail log files](https://www.elastic.co/guide/en/observability/current/tail-logs.html). -* Through **Discover**: This page is a good option for investigating widespread historical patterns. Check [Discover](https://www.elastic.co/guide/en/kibana/current/discover.html). +* Through **Observability** > **Logs** > **Stream**: This page shows errors in real-time and is part of the same logs Elastic Support reviews when a deployment experiences issues. Check [Tail log files](/solutions/observability/logs/logs-stream.md). +* Through **Discover**: This page is a good option for investigating widespread historical patterns. Check [Discover](/explore-analyze/discover.md). Discover requires a quick setup in Kibana: 1. Go to **Stack Management** > **Data Views** (formerly *Index Patterns*). - 2. Create a [data view](https://www.elastic.co/guide/en/kibana/current/data-views.html) for `elastic-cloud-logs*` and set **Timestamp field** to `@timestamp`: + 2. Create a [data view](/explore-analyze/find-and-organize/data-views.md) for `elastic-cloud-logs*` and set **Timestamp field** to `@timestamp`: :::{image} ../../../images/cloud-ec-ce-monitoring-logs.png :alt: Create data view example in Kibana @@ -104,14 +104,14 @@ You will get this request reported as a new log. Audit logs do not currently rep ## Get notified [ec_get_notified] -You should take advantage of the default [Elastic Stack monitoring alerts](https://www.elastic.co/guide/en/kibana/current/kibana-alerts.html) that are available out-of-the-box. You don’t have to do anything other than enable shipping logs and metrics to have them made available to you (which you did earlier on). +You should take advantage of the default [Elastic Stack monitoring alerts](/deploy-manage/monitor/monitoring-data/kibana-alerts.md) that are available out-of-the-box. You don’t have to do anything other than enable shipping logs and metrics to have them made available to you (which you did earlier on). On top of these default alerts that write to indices you can investigate, you might want to add some custom actions, such as a [connector](https://www.elastic.co/guide/en/kibana/current/action-types.html) for Slack notifications. To set up these notifications, you first configure a Slack connector and then append it to the default alerts and actions. From Kibana: 1. Go to **Stack Management** > **Rules and Connectors** > **Connectors** and create your Slack connector: 1. Select **Slack**. - 2. [Create a Slack Webhook URL](https://www.elastic.co/guide/en/kibana/current/slack-action-type.html#configuring-slack) and paste it into the **Webhook URL** field. + 2. [Create a Slack Webhook URL](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/slack-action-type.md#configuring-slack) and paste it into the **Webhook URL** field. 3. Select **Save**. 2. Go to **Stack Monitoring** and select **Enter setup mode**. @@ -145,7 +145,7 @@ When issues come up that you need to troubleshoot, you’ll frequently start wit You can run this query and many others from the API consoles available via: -* **Kibana** > **Dev Tools**. Check [Run Elasticsearch API requests](https://www.elastic.co/guide/en/kibana/current/console-kibana.html). +* **Kibana** > **Dev Tools**. Check [Run Elasticsearch API requests](/explore-analyze/query-filter/tools/console.md). * **Elastic Cloud** > **Deployment** > **Elasticsearch** > **API Console**. Check [Access the Elasticsearch API console](https://www.elastic.co/guide/en/cloud/current/ec-api-console.html). You can also learn more about the queries you should run for your deployment by reading our blog [Managing and Troubleshooting Elasticsearch Memory](https://www.elastic.co/blog/managing-and-troubleshooting-elasticsearch-memory). diff --git a/raw-migrated-files/cloud/cloud/ec-password-reset.md b/raw-migrated-files/cloud/cloud/ec-password-reset.md index b84cd1b4a4..6977978469 100644 --- a/raw-migrated-files/cloud/cloud/ec-password-reset.md +++ b/raw-migrated-files/cloud/cloud/ec-password-reset.md @@ -13,7 +13,7 @@ Resetting the `elastic` user password does not interfere with Marketplace integr ::::{note} -The `elastic` user should be not be used unless you have no other way to access your deployment. [Create API keys for ingesting data](https://www.elastic.co/guide/en/beats/filebeat/current/beats-api-keys.html), and create user accounts with [appropriate roles for user access](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). +The `elastic` user should be not be used unless you have no other way to access your deployment. [Create API keys for ingesting data](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/beats-api-keys.md), and create user accounts with [appropriate roles for user access](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). :::: diff --git a/raw-migrated-files/cloud/cloud/ec-secure-clusters-kerberos.md b/raw-migrated-files/cloud/cloud/ec-secure-clusters-kerberos.md index f6bf6d3acf..f451dd4701 100644 --- a/raw-migrated-files/cloud/cloud/ec-secure-clusters-kerberos.md +++ b/raw-migrated-files/cloud/cloud/ec-secure-clusters-kerberos.md @@ -5,7 +5,7 @@ You can secure your Elasticsearch clusters and Kibana instances in a deployment ## Before you begin [ec_before_you_begin_13] -The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/kerberos-realm.html). +The steps in this section require an understanding of Kerberos. To learn more about Kerberos, check our documentation on [configuring Elasticsearch for Kerberos authentication](/deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md). ## Configure the cluster to use Kerberos [ec-configure-kerberos-settings] diff --git a/raw-migrated-files/cloud/cloud/ec-secure-clusters-oidc.md b/raw-migrated-files/cloud/cloud/ec-secure-clusters-oidc.md index 4b3ec0a1a9..97be57fbb0 100644 --- a/raw-migrated-files/cloud/cloud/ec-secure-clusters-oidc.md +++ b/raw-migrated-files/cloud/cloud/ec-secure-clusters-oidc.md @@ -12,7 +12,7 @@ To prepare for using OpenID Connect for authentication for deployments: * Create or use an existing deployment. Make note of the Kibana endpoint URL, it will be referenced as `` in the following steps. * The steps in this section required a moderate understanding of [OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.md#Authentication) in general and the Authorization Code Grant flow specifically. For more information about OpenID Connect and how it works with the Elastic Stack check: - * Our [configuration guide for Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html#oidc-elasticsearch-authentication). + * Our [configuration guide for Elasticsearch](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md#oidc-elasticsearch-authentication). diff --git a/raw-migrated-files/cloud/cloud/ec-securing-clusters-JWT.md b/raw-migrated-files/cloud/cloud/ec-securing-clusters-JWT.md index 5947abe84d..3455d0008c 100644 --- a/raw-migrated-files/cloud/cloud/ec-securing-clusters-JWT.md +++ b/raw-migrated-files/cloud/cloud/ec-securing-clusters-JWT.md @@ -97,7 +97,7 @@ xpack: ::::{note} -Refer to [JWT authentication documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/jwt-auth-realm.html) for more details and examples. +Refer to [JWT authentication documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/jwt.md) for more details and examples. :::: diff --git a/raw-migrated-files/cloud/cloud/ec-securing-clusters-SAML.md b/raw-migrated-files/cloud/cloud/ec-securing-clusters-SAML.md index fcdf52da16..f2206d4c64 100644 --- a/raw-migrated-files/cloud/cloud/ec-securing-clusters-SAML.md +++ b/raw-migrated-files/cloud/cloud/ec-securing-clusters-SAML.md @@ -33,7 +33,7 @@ You must edit your cluster configuration, sometimes also referred to as the depl 1. Specifies the authentication realm service. 2. Defines the SAML realm name. The SAML realm name can only contain alphanumeric characters, underscores, and hyphens. 3. The order of the SAML realm in your authentication chain. Allowed values are between `2` and `100`. Set to `2` unless you plan on configuring multiple SSO realms for this cluster. - 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. + 4. Defines the SAML attribute that is going to be mapped to the principal (username) of the authenticated user in Kibana. In this non-normative example, `nameid:persistent` maps the `NameID` with the `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` format from the Subject of the SAML Assertion. You can use any SAML attribute that carries the necessary value for your use case in this setting, such as `uid` or `mail`. Refer to [the attribute mapping documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/saml.md#saml-attributes-mapping) for details and available options. 5. Defines the SAML attribute used for role mapping when configured in Kibana. Common choices are `groups` or `roles`. The values for both `attributes.principal` and `attributes.groups` depend on the IdP provider, so be sure to review their documentation. Refer to [the attribute mapping documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/saml-guide-stack.html#saml-attributes-mapping) for details and available options. 6. The file path or the HTTPS URL where your IdP metadata is available, such as `https://idpurl.com/sso/saml/metadata`. If you configure a URL you need to make ensure that your Elasticsearch cluster can access it. 7. The SAML EntityID of your IdP. This can be read from the configuration page of the IdP, or its SAML metadata, such as `https://idpurl.com/entity_id`. diff --git a/raw-migrated-files/cloud/cloud/ec-securing-clusters-oidc-op.md b/raw-migrated-files/cloud/cloud/ec-securing-clusters-oidc-op.md index 9aa62bfe3a..1139042ab9 100644 --- a/raw-migrated-files/cloud/cloud/ec-securing-clusters-oidc-op.md +++ b/raw-migrated-files/cloud/cloud/ec-securing-clusters-oidc-op.md @@ -103,7 +103,7 @@ Follow these steps to configure OpenID Connect single sign-on on Elasticsearch S The following role mapping for OIDC restricts access to a specific user `(firstname.lastname)` based on the `claim_patterns.principal` email address. This prevents other users on the same domain from having access to your deployment. You can remove the rule or adjust it at your convenience. - More details are available in our [Configuring role mappings documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/oidc-guide.html#oidc-role-mappings). + More details are available in our [Configuring role mappings documentation](/deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md#oidc-role-mappings). ```json POST /_security/role_mapping/oidc_kibana diff --git a/raw-migrated-files/cloud/cloud/ec-security.md b/raw-migrated-files/cloud/cloud/ec-security.md index 03a285d1d2..fdd9f099e5 100644 --- a/raw-migrated-files/cloud/cloud/ec-security.md +++ b/raw-migrated-files/cloud/cloud/ec-security.md @@ -7,7 +7,7 @@ The security of Elasticsearch Service is described on the [{{ecloud}} security]( * Reset the [`elastic` user password](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). * Use third-party authentication providers and services like [SAML](../../../deploy-manage/users-roles/cluster-or-deployment-auth/saml.md), [OpenID Connect](../../../deploy-manage/users-roles/cluster-or-deployment-auth/openid-connect.md), or [Kerberos](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kerberos.md) to provide dynamic [role mappings](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md) for role based or attribute based access control. * Use {{kib}} Spaces and roles to [secure access to {{kib}}](../../../deploy-manage/users-roles/cluster-or-deployment-auth/quickstart.md). - * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](https://www.elastic.co/guide/en/beats/filebeat/current/beats-api-keys.html). + * Authorize and authenticate service accounts for {{beats}} by [granting access using API keys](asciidocalypse://docs/beats/docs/reference/ingestion-tools/beats-filebeat/beats-api-keys.md). * Roles can provide full, or read only, access to your data and can be created in Kibana or directly in Elasticsearch. Check [defining roles](../../../deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles.md) for full details. diff --git a/raw-migrated-files/cloud/cloud/ec-select-subscription-level.md b/raw-migrated-files/cloud/cloud/ec-select-subscription-level.md index 08063fc4f0..32a2591c45 100644 --- a/raw-migrated-files/cloud/cloud/ec-select-subscription-level.md +++ b/raw-migrated-files/cloud/cloud/ec-select-subscription-level.md @@ -35,16 +35,16 @@ This overview shows you: You can [change your subscription level](../../../deploy-manage/cloud-organization/billing/manage-subscription.md) to the recommended level, or stop using the features that belong to a higher level. In the following list, you can find the features we are tracking and the relevant instructions to remove them from your deployments: `Machine learning` -: Edit your deployment to disable [machine learning](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-overview.html). +: Edit your deployment to disable [machine learning](/explore-analyze/machine-learning/anomaly-detection.md). `Searchable snapshots` : Edit your deployment index management policies to disable the frozen tier that is using [searchable snapshots](../../../deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md), or set up your cold tier to not mount indices from a searchable snapshot. `JDBC/ODBC clients` -: Make sure that there are no applications that use the SQL [JDBC](https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-jdbc.html) or [ODBC](https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-odbc.html) clients. +: Make sure that there are no applications that use the SQL [JDBC](/explore-analyze/query-filter/languages/sql-jdbc.md) or [ODBC](/explore-analyze/query-filter/languages/sql-odbc.md) clients. `Field-level or document-level security` -: Remove any user role configurations based on field or document access [through the API](https://www.elastic.co/guide/en/elasticsearch/reference/current/field-and-document-access-control.html) or the Kibana Roles page. +: Remove any user role configurations based on field or document access [through the API](/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-access-at-document-field-level.md) or the Kibana Roles page. ::::{note} After you have made your changes to the deployment, it can take up to one hour to clear the notification banner. diff --git a/raw-migrated-files/cloud/cloud/ec-sign-outgoing-saml-message.md b/raw-migrated-files/cloud/cloud/ec-sign-outgoing-saml-message.md index 879271da5b..0a49621eb7 100644 --- a/raw-migrated-files/cloud/cloud/ec-sign-outgoing-saml-message.md +++ b/raw-migrated-files/cloud/cloud/ec-sign-outgoing-saml-message.md @@ -59,6 +59,6 @@ The following optional realm settings are supported: * `force_authn` Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP authenticate the current user. If set to `true`, the IdP is required to verify the user’s identity, irrespective of any existing sessions they might have. Defaults to `false`. * `idp.use_single_logout` Indicates whether to utilise the Identity Provider’s `` (if one exists in the IdP metadata file). Defaults to `true`. -After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](https://www.elastic.co/guide/en/elasticsearch/reference/current/trb-security-saml.html) which contains information about common issues and suggestions for their resolution. +After completing these steps, you can log in to Kibana by authenticating against your SAML IdP. If you encounter any issues with the configuration, refer to the [SAML troubleshooting page](/troubleshoot/elasticsearch/security/trb-security-saml.md) which contains information about common issues and suggestions for their resolution. diff --git a/raw-migrated-files/cloud/cloud/ec-traffic-filtering-through-the-api.md b/raw-migrated-files/cloud/cloud/ec-traffic-filtering-through-the-api.md index 325d0f759e..174063b7e3 100644 --- a/raw-migrated-files/cloud/cloud/ec-traffic-filtering-through-the-api.md +++ b/raw-migrated-files/cloud/cloud/ec-traffic-filtering-through-the-api.md @@ -52,7 +52,7 @@ https://api.elastic-cloud.com/api/v1/deployments/traffic-filter/rulesets \ ``` `region` -: The region is always the same region as the deployment you want to associate with a traffic filter rule set. For details, check the [list of available regions](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html). +: The region is always the same region as the deployment you want to associate with a traffic filter rule set. For details, check the [list of available regions](asciidocalypse://docs/cloud/docs/reference/cloud/cloud-hosted/ec-regions-templates-instances.md). `type` : The type of the rule set. In the JSON object, we use `ip` for the ingress IP traffic filter. Currently, we support `ip`, `egress_firewall`, `vpce` (AWS Private Link), `azure_private_endpoint` and `gcp_private_service_connect_endpoint`. These are described in further detail below. diff --git a/raw-migrated-files/docs-content/serverless/action-connectors.md b/raw-migrated-files/docs-content/serverless/action-connectors.md index fa22504a73..16f9cc7667 100644 --- a/raw-migrated-files/docs-content/serverless/action-connectors.md +++ b/raw-migrated-files/docs-content/serverless/action-connectors.md @@ -1,7 +1,7 @@ # {{connectors-app}} [action-connectors] ::::{note} -This page is about Kibana connectors that integrate with services like generative AI model providers. If you’re looking for Search connectors that synchronize third-party data into {{es}}, refer to [Connector clients](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-integrations-connector-client.html). +This page is about Kibana connectors that integrate with services like generative AI model providers. If you’re looking for Search connectors that synchronize third-party data into {{es}}, refer to [Connector clients](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/index.md). :::: @@ -10,31 +10,31 @@ This content applies to: [![Elasticsearch](../../../images/serverless-es-badge.s The list of available connectors varies by project type. -* [**Amazon Bedrock**^](https://www.elastic.co/guide/en/kibana/current/bedrock-action-type.html): Send a request to Amazon Bedrock. -* [**Cases**^](https://www.elastic.co/guide/en/kibana/current/cases-action-type.html): Add alerts to cases. -* [**CrowdStrike**^](https://www.elastic.co/guide/en/kibana/current/crowdstrike-action-type.html): Send a request to CrowdStrike. -* [**D3 Security**^](https://www.elastic.co/guide/en/kibana/current/d3security-action-type.html): Create an event or trigger playbook workflow actions in D3 SOAR. -* [**Email**^](https://www.elastic.co/guide/en/kibana/current/email-action-type.html): Send email from your server. -* [**Google Gemini**^](https://www.elastic.co/guide/en/kibana/current/gemini-action-type.html): Send a request to Google Gemini. -* [**IBM Resilient**^](https://www.elastic.co/guide/en/kibana/current/resilient-action-type.html): Create an incident in IBM Resilient. -* [**Index**^](https://www.elastic.co/guide/en/kibana/current/index-action-type.html): Index data into {{es}}. -* [**Jira**^](https://www.elastic.co/guide/en/kibana/current/jira-action-type.html): Create an incident in Jira. -* [**Microsoft Teams**^](https://www.elastic.co/guide/en/kibana/current/teams-action-type.html): Send a message to a Microsoft Teams channel. -* [**Observability AI Assistant**^](https://www.elastic.co/guide/en/kibana/current/obs-ai-assistant-action-type.html): Add AI-driven insights and custom actions to your workflow. -* [**OpenAI**^](https://www.elastic.co/guide/en/kibana/current/openai-action-type.html): Send a request to OpenAI. -* [**Opsgenie**^](https://www.elastic.co/guide/en/kibana/current/opsgenie-action-type.html): Create or close an alert in Opsgenie. -* [**PagerDuty**^](https://www.elastic.co/guide/en/kibana/current/pagerduty-action-type.html): Send an event in PagerDuty. -* [**SentinelOne**^](https://www.elastic.co/guide/en/kibana/current/sentinelone-action-type.html): Perform response actions on SentinelOne-protected hosts. -* [**ServerLog**^](https://www.elastic.co/guide/en/kibana/current/server-log-action-type.html): Add a message to a Kibana log. -* [**ServiceNow ITOM**^](https://www.elastic.co/guide/en/kibana/current/servicenow-itom-action-type.html): Create an event in ServiceNow ITOM. -* [**ServiceNow ITSM**^](https://www.elastic.co/guide/en/kibana/current/servicenow-action-type.html): Create an incident in ServiceNow ITSM. -* [**ServiceNow SecOps**^](https://www.elastic.co/guide/en/kibana/current/servicenow-sir-action-type.html): Create a security incident in ServiceNow SecOps. -* [**Slack**^](https://www.elastic.co/guide/en/kibana/current/slack-action-type.html): Send messages to Slack channels. -* [**Swimlane**^](https://www.elastic.co/guide/en/kibana/current/swimlane-action-type.html): Create records in Swimlane. -* [**TheHive**^](https://www.elastic.co/guide/en/kibana/current/thehive-action-type.html): Create cases and alerts in TheHive. -* [**Tines**^](https://www.elastic.co/guide/en/kibana/current/tines-action-type.html): Send events to a story. -* [**Torq**^](https://www.elastic.co/guide/en/kibana/current/torq-action-type.html): Trigger a Torq workflow. -* [**Webhook**^](https://www.elastic.co/guide/en/kibana/current/webhook-action-type.html): Send a request to a web service. -* [**Webhook - Case Management**^](https://www.elastic.co/guide/en/kibana/current/cases-webhook-action-type.html): Send a request to a Case Management web service. -* [**xMatters**^](https://www.elastic.co/guide/en/kibana/current/xmatters-action-type.html): Trigger an xMatters workflow. +* [**Amazon Bedrock**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/bedrock-action-type.md): Send a request to Amazon Bedrock. +* [**Cases**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-action-type.md): Add alerts to cases. +* [**CrowdStrike**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/crowdstrike-action-type.md): Send a request to CrowdStrike. +* [**D3 Security**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/d3security-action-type.md): Create an event or trigger playbook workflow actions in D3 SOAR. +* [**Email**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/email-action-type.md): Send email from your server. +* [**Google Gemini**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/gemini-action-type.md): Send a request to Google Gemini. +* [**IBM Resilient**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/resilient-action-type.md): Create an incident in IBM Resilient. +* [**Index**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/index-action-type.md): Index data into {{es}}. +* [**Jira**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/jira-action-type.md): Create an incident in Jira. +* [**Microsoft Teams**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/teams-action-type.md): Send a message to a Microsoft Teams channel. +* [**Observability AI Assistant**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/obs-ai-assistant-action-type.md): Add AI-driven insights and custom actions to your workflow. +* [**OpenAI**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/openai-action-type.md): Send a request to OpenAI. +* [**Opsgenie**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/opsgenie-action-type.md): Create or close an alert in Opsgenie. +* [**PagerDuty**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/pagerduty-action-type.md): Send an event in PagerDuty. +* [**SentinelOne**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/sentinelone-action-type.md): Perform response actions on SentinelOne-protected hosts. +* [**ServerLog**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/server-log-action-type.md): Add a message to a Kibana log. +* [**ServiceNow ITOM**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/servicenow-itom-action-type.md): Create an event in ServiceNow ITOM. +* [**ServiceNow ITSM**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/servicenow-action-type.md): Create an incident in ServiceNow ITSM. +* [**ServiceNow SecOps**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/servicenow-sir-action-type.md): Create a security incident in ServiceNow SecOps. +* [**Slack**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/slack-action-type.md): Send messages to Slack channels. +* [**Swimlane**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/swimlane-action-type.md): Create records in Swimlane. +* [**TheHive**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/thehive-action-type.md): Create cases and alerts in TheHive. +* [**Tines**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/tines-action-type.md): Send events to a story. +* [**Torq**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/torq-action-type.md): Trigger a Torq workflow. +* [**Webhook**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/webhook-action-type.md): Send a request to a web service. +* [**Webhook - Case Management**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/cases-webhook-action-type.md): Send a request to a Case Management web service. +* [**xMatters**^](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/xmatters-action-type.md): Trigger an xMatters workflow. diff --git a/raw-migrated-files/docs-content/serverless/ai-assistant-knowledge-base.md b/raw-migrated-files/docs-content/serverless/ai-assistant-knowledge-base.md index ecda0445f1..9d2c0b497a 100644 --- a/raw-migrated-files/docs-content/serverless/ai-assistant-knowledge-base.md +++ b/raw-migrated-files/docs-content/serverless/ai-assistant-knowledge-base.md @@ -117,7 +117,7 @@ Refer to the following video for an example of adding a document to Knowledge Ba Add an index as a knowledge source when you want new information added to that index to automatically inform AI Assistant’s responses. Common security examples include asset inventories, network configuration information, on-call matrices, threat intelligence reports, and vulnerability scans. ::::{important} -Indices added to Knowledge Base must have at least one field mapped as [semantic text](https://www.elastic.co/guide/en/elasticsearch/reference/current/semantic-text.html). +Indices added to Knowledge Base must have at least one field mapped as [semantic text](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/semantic-text.md). :::: diff --git a/raw-migrated-files/docs-content/serverless/detections-logsdb-index-mode-impact.md b/raw-migrated-files/docs-content/serverless/detections-logsdb-index-mode-impact.md index 84fc5c7b08..1cc13f0e48 100644 --- a/raw-migrated-files/docs-content/serverless/detections-logsdb-index-mode-impact.md +++ b/raw-migrated-files/docs-content/serverless/detections-logsdb-index-mode-impact.md @@ -2,7 +2,7 @@ Logsdb is enabled by default for {{serverless-full}}. This topic explains the impact of using logsdb index mode with {{sec-serverless}}. -With logsdb index mode, the original `_source` field is not stored in the index but can be reconstructed using [synthetic `_source`](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source). +With logsdb index mode, the original `_source` field is not stored in the index but can be reconstructed using [synthetic `_source`](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/mapping-source-field.md#synthetic-source). When the `_source` is reconstructed, [modifications](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html#synthetic-source-modifications) are possible. Therefore, there could be a mismatch between users' expectations and how fields are formatted. diff --git a/raw-migrated-files/docs-content/serverless/elasticsearch-clients.md b/raw-migrated-files/docs-content/serverless/elasticsearch-clients.md index 5952341e92..57755956d9 100644 --- a/raw-migrated-files/docs-content/serverless/elasticsearch-clients.md +++ b/raw-migrated-files/docs-content/serverless/elasticsearch-clients.md @@ -2,13 +2,13 @@ You can use the following language clients with {{es-serverless}}: -* [Go](https://www.elastic.co/guide/en/serverless/current/elasticsearch-go-client-getting-started.html) -* [Java](https://www.elastic.co/guide/en/serverless/current/elasticsearch-java-client-getting-started.html) -* [.NET](https://www.elastic.co/guide/en/serverless/current/elasticsearch-dot-net-client-getting-started.html) -* [Node.JS](https://www.elastic.co/guide/en/serverless/current/elasticsearch-nodejs-client-getting-started.html) -* [PHP](https://www.elastic.co/guide/en/serverless/current/elasticsearch-php-client-getting-started.html) -* [Python](https://www.elastic.co/guide/en/serverless/current/elasticsearch-python-client-getting-started.html) -* [Ruby](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ruby-client-getting-started.html) +* [Go](asciidocalypse://docs/go-elasticsearch/docs/reference/elasticsearch/elasticsearch-client-go-api/getting-started-serverless.md) +* [Java](asciidocalypse://docs/elasticsearch-java/docs/reference/elasticsearch/elasticsearch-client-java-api-client/getting-started-serverless.md) +* [.NET](asciidocalypse://docs/elasticsearch-net/docs/reference/elasticsearch/elasticsearch-client-net-api/getting-started.md) +* [Node.JS](asciidocalypse://docs/elasticsearch-js/docs/reference/elasticsearch/elasticsearch-client-javascript-api/getting-started.md) +* [PHP](asciidocalypse://docs/elasticsearch-php/docs/reference/elasticsearch/elasticsearch-client-php-api/getting-started.md) +* [Python](asciidocalypse://docs/elasticsearch-py/docs/reference/elasticsearch/elasticsearch-client-python-api/getting-started.md) +* [Ruby](asciidocalypse://docs/elasticsearch-ruby/docs/reference/elasticsearch/elasticsearch-client-ruby-api/getting-started.md) ::::{tip} Learn how to [connect to your {{es-serverless}} endpoint](../../../solutions/search/get-started.md). diff --git a/raw-migrated-files/docs-content/serverless/elasticsearch-differences.md b/raw-migrated-files/docs-content/serverless/elasticsearch-differences.md index 0df47cae96..7409b13bd4 100644 --- a/raw-migrated-files/docs-content/serverless/elasticsearch-differences.md +++ b/raw-migrated-files/docs-content/serverless/elasticsearch-differences.md @@ -36,7 +36,7 @@ To ensure optimal performance, follow these recommendations for sizing individua For large datasets that exceed the recommended maximum size for a single index, consider splitting your data across smaller indices and using an alias to search them collectively. -These recommendations do not apply to indices using better binary quantization (BBQ). Refer to [vector quantization](https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-quantization) in the core {{es}} docs for more information. +These recommendations do not apply to indices using better binary quantization (BBQ). Refer to [vector quantization](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/dense-vector.md#dense-vector-quantization) in the core {{es}} docs for more information. ## API availability [elasticsearch-differences-serverless-apis-availability] @@ -88,7 +88,7 @@ When attempting to use an unavailable API, you’ll receive a clear error messag ## Settings availability [elasticsearch-differences-serverless-settings-availability] -In {{es-serverless}}, you can only configure [index-level settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#index-modules-settings). Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. +In {{es-serverless}}, you can only configure [index-level settings](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/index-settings/index.md#index-modules-settings). Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. Available settings : **Index-level settings**: Settings that control how {{es}} documents are processed, stored, and searched are available to end users. These include: @@ -152,7 +152,7 @@ The following {{es-serverless}} project-specific features are planned for future * Managed Search connectors - You can use [self-managed Search connectors](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-build-connector.html) in the meantime. + You can use [self-managed Search connectors](asciidocalypse://docs/elasticsearch/docs/reference/ingestion-tools/search-connectors/self-managed-connectors.md) in the meantime. @@ -161,5 +161,5 @@ The following {{es-serverless}} project-specific features are planned for future The following features are not available in {{es-serverless}} and are not planned for future support: * [Custom plugins and bundles](https://www.elastic.co/guide/en/cloud/current/ec-custom-bundles.html) -* [{{es}} for Apache Hadoop](https://www.elastic.co/guide/en/elasticsearch/hadoop/current/reference.html) -* [Scripted metric aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-scripted-metric-aggregation.html) +* [{{es}} for Apache Hadoop](asciidocalypse://docs/elasticsearch-hadoop/docs/reference/ingestion-tools/elasticsearch-hadoop/elasticsearch-for-apache-hadoop.md) +* [Scripted metric aggregations](asciidocalypse://docs/elasticsearch/docs/reference/data-analysis/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) diff --git a/raw-migrated-files/docs-content/serverless/elasticsearch-ingest-data-through-api.md b/raw-migrated-files/docs-content/serverless/elasticsearch-ingest-data-through-api.md index a690b4b1ff..4ecbb479bf 100644 --- a/raw-migrated-files/docs-content/serverless/elasticsearch-ingest-data-through-api.md +++ b/raw-migrated-files/docs-content/serverless/elasticsearch-ingest-data-through-api.md @@ -1,6 +1,6 @@ # Ingest data through API [elasticsearch-ingest-data-through-api] -The {{es}} APIs enable you to ingest data through code. You can use the APIs of one of the [language clients](../../../solutions/search/site-or-app/clients.md) or the {{es}} HTTP APIs. The examples on this page use the HTTP APIs to demonstrate how ingesting works in {{es}} through APIs. If you want to ingest timestamped data or have a more complex ingestion use case, check out [Beats](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-beats.html) or [Logstash](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-logstash.html). +The {{es}} APIs enable you to ingest data through code. You can use the APIs of one of the [language clients](../../../solutions/search/site-or-app/clients.md) or the {{es}} HTTP APIs. The examples on this page use the HTTP APIs to demonstrate how ingesting works in {{es}} through APIs. If you want to ingest timestamped data or have a more complex ingestion use case, check out [Beats](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-beats.html) or [Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md). ## Using the bulk API [elasticsearch-ingest-data-through-api-using-the-bulk-api] diff --git a/raw-migrated-files/docs-content/serverless/general-sign-up-trial.md b/raw-migrated-files/docs-content/serverless/general-sign-up-trial.md index e5bea6e659..94eea6d0ed 100644 --- a/raw-migrated-files/docs-content/serverless/general-sign-up-trial.md +++ b/raw-migrated-files/docs-content/serverless/general-sign-up-trial.md @@ -1,6 +1,6 @@ # Sign up for Elastic Cloud [general-sign-up-trial] -The following page provides information on how to sign up for an Elastic Cloud Serverless account, for information on how to sign up for hosted deployments, see [Elasticsearch Service - How do i sign up?](https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html). +The following page provides information on how to sign up for an Elastic Cloud Serverless account, for information on how to sign up for hosted deployments, see [Elasticsearch Service - How do i sign up?](/deploy-manage/deploy/elastic-cloud/cloud-hosted.md). ## Trial features [general-sign-up-trial-what-is-included-in-my-trial] diff --git a/raw-migrated-files/docs-content/serverless/infrastructure-and-host-monitoring-intro.md b/raw-migrated-files/docs-content/serverless/infrastructure-and-host-monitoring-intro.md index 1dce9c82e5..8a5ae66d5a 100644 --- a/raw-migrated-files/docs-content/serverless/infrastructure-and-host-monitoring-intro.md +++ b/raw-migrated-files/docs-content/serverless/infrastructure-and-host-monitoring-intro.md @@ -11,7 +11,7 @@ Explore the topics in this section to learn how to observe and monitor hosts and | --- | --- | | [Analyze infrastructure and host metrics](../../../solutions/observability/infra-and-hosts/analyze-infrastructure-host-metrics.md) | Visualize infrastructure metrics to help diagnose problematic spikes, identify high resource utilization, automatically discover and track pods, and unify your metrics with other observability data. | | [Troubleshooting](../../../troubleshoot/observability/troubleshooting-infrastructure-monitoring.md) | Troubleshoot common issues on your own or ask for help. | -| [Metrics reference](https://www.elastic.co/guide/en/serverless/current/observability-metrics-reference.html) | Learn about the key metrics displayed in the Infrastructure UI and how they are calculated. | +| [Metrics reference](asciidocalypse://docs/docs-content/docs/reference/data-analysis/observability/metrics-reference-serverless.md) | Learn about the key metrics displayed in the Infrastructure UI and how they are calculated. | diff --git a/raw-migrated-files/docs-content/serverless/intro.md b/raw-migrated-files/docs-content/serverless/intro.md index ba4cc3235a..64f49f4e55 100644 --- a/raw-migrated-files/docs-content/serverless/intro.md +++ b/raw-migrated-files/docs-content/serverless/intro.md @@ -8,9 +8,9 @@ Serverless projects use the core components of the {{stack}}, such as {{es}} and Elastic provides three serverless solutions available on {{ecloud}}: -* **https://www.elastic.co/guide/en/serverless/current/what-is-elasticsearch-serverless.html[{{es-serverless}}]**: Build powerful applications and search experiences using a rich ecosystem of vector search capabilities, APIs, and libraries. -* **https://www.elastic.co/guide/en/serverless/current/what-is-observability-serverless.html[{{obs-serverless}}]**: Monitor your own platforms and services using powerful machine learning and analytics tools with your logs, metrics, traces, and APM data. -* **https://www.elastic.co/guide/en/serverless/current/what-is-security-serverless.html[{{sec-serverless}}]**: Detect, investigate, and respond to threats with SIEM, endpoint protection, and AI-powered analytics capabilities. +* **/solutions/search.md[{{es-serverless}}]**: Build powerful applications and search experiences using a rich ecosystem of vector search capabilities, APIs, and libraries. +* **/solutions/observability.md[{{obs-serverless}}]**: Monitor your own platforms and services using powerful machine learning and analytics tools with your logs, metrics, traces, and APM data. +* **/solutions/security/elastic-security-serverless.md[{{sec-serverless}}]**: Detect, investigate, and respond to threats with SIEM, endpoint protection, and AI-powered analytics capabilities. [Learn more about {{serverless-full}} in our blog](https://www.elastic.co/blog/elastic-cloud-serverless). @@ -32,7 +32,7 @@ Elastic provides three serverless solutions available on {{ecloud}}: ## Differences between serverless projects and hosted deployments on {{ecloud}} [general-what-is-serverless-elastic-differences-between-serverless-projects-and-hosted-deployments-on-ecloud] -You can run [hosted deployments](https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html) of the {{stack}} on {{ecloud}}. These hosted deployments provide more provisioning and advanced configuration options. +You can run [hosted deployments](/deploy-manage/deploy/elastic-cloud/cloud-hosted.md) of the {{stack}} on {{ecloud}}. These hosted deployments provide more provisioning and advanced configuration options. | | | | | --- | --- | --- | @@ -57,7 +57,7 @@ Migration paths between hosted deployments and serverless projects are currently **How can I move data to or from serverless projects?** -We are working on data migration tools! In the interim, [use Logstash](https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-logstash.html) with Elasticsearch input and output plugins to move data to and from serverless projects. +We are working on data migration tools! In the interim, [use Logstash](asciidocalypse://docs/logstash/docs/reference/ingestion-tools/logstash/index.md) with Elasticsearch input and output plugins to move data to and from serverless projects. **How does serverless ensure compatibility between software versions?** diff --git a/raw-migrated-files/docs-content/serverless/observability-add-logs-service-name.md b/raw-migrated-files/docs-content/serverless/observability-add-logs-service-name.md index be3e2db3df..c8b49bf2ad 100644 --- a/raw-migrated-files/docs-content/serverless/observability-add-logs-service-name.md +++ b/raw-migrated-files/docs-content/serverless/observability-add-logs-service-name.md @@ -10,7 +10,7 @@ To add a service name to your logs, either: ## Use the add fields processor to add a service name [observability-add-logs-service-name-use-the-add-fields-processor-to-add-a-service-name] -For log data without a service name, use the [`add_fields` processor](https://www.elastic.co/guide/en/fleet/current/add_fields-processor.html) to add the `service.name` field. You can add the processor in an integration’s settings or in the {{agent}} or {{filebeat}} configuration. +For log data without a service name, use the [`add_fields` processor](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/add_fields-processor.md) to add the `service.name` field. You can add the processor in an integration’s settings or in the {{agent}} or {{filebeat}} configuration. For example, adding the `add_fields` processor to the inputs section of a standalone {{agent}} or {{filebeat}} configuration would add `your_service_name` as the `service.name` field: @@ -29,12 +29,12 @@ Adding the `add_fields` processor to an integration’s settings would add `your :class: screenshot ::: -For more on defining processors, refer to [define processors](https://www.elastic.co/guide/en/fleet/current/elastic-agent-processor-configuration.html). +For more on defining processors, refer to [define processors](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/agent-processors.md). ## Map an existing field to the service name field [observability-add-logs-service-name-map-an-existing-field-to-the-service-name-field] -For logs that with an existing field being used to represent the service name, map that field to the `service.name` field using the [alias field type](https://www.elastic.co/guide/en/elasticsearch/reference/current/field-alias.html). Follow these steps to update your mapping: +For logs that with an existing field being used to represent the service name, map that field to the `service.name` field using the [alias field type](asciidocalypse://docs/elasticsearch/docs/reference/elasticsearch/mapping-reference/field-alias.md). Follow these steps to update your mapping: 1. Go to **Management** → **Index Management** → **Index Templates**. 2. Search for the index template you want to update. diff --git a/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md b/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md index 2c6b403699..38ba7054be 100644 --- a/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md +++ b/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md @@ -12,9 +12,9 @@ The AI Assistant uses generative AI to provide: The AI Assistant integrates with your large language model (LLM) provider through our supported Elastic connectors: -* [OpenAI connector](https://www.elastic.co/guide/en/kibana/current/openai-action-type.html) for OpenAI or Azure OpenAI Service. -* [Amazon Bedrock connector](https://www.elastic.co/guide/en/kibana/current/bedrock-action-type.html) for Amazon Bedrock, specifically for the Claude models. -* [Google Gemini connector](https://www.elastic.co/guide/en/kibana/current/gemini-action-type.html) for Google Gemini. +* [OpenAI connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/openai-action-type.md) for OpenAI or Azure OpenAI Service. +* [Amazon Bedrock connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/bedrock-action-type.md) for Amazon Bedrock, specifically for the Claude models. +* [Google Gemini connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/gemini-action-type.md) for Google Gemini. ::::{important} The AI Assistant is powered by an integration with your large language model (LLM) provider. LLMs are known to sometimes present incorrect information as if it’s correct. Elastic supports configuration and connection to the LLM provider and your knowledge base, but is not responsible for the LLM’s responses. @@ -248,7 +248,7 @@ Clicking a prompt generates a message specific to that log entry. You can contin ### Add the AI Assistant connector to alerting workflows [observability-ai-assistant-add-the-ai-assistant-connector-to-alerting-workflows] -You can use the [Observability AI Assistant connector](https://www.elastic.co/guide/en/kibana/current/obs-ai-assistant-action-type.html) to add AI-generated insights and custom actions to your alerting workflows. To do this: +You can use the [Observability AI Assistant connector](asciidocalypse://docs/kibana/docs/reference/connectors-kibana/obs-ai-assistant-action-type.md) to add AI-generated insights and custom actions to your alerting workflows. To do this: 1. [Create (or edit) an alerting rule](../../../solutions/observability/incident-management/create-manage-rules.md) and specify the conditions that must be met for the alert to fire. 2. Under **Actions**, select the **Observability AI Assistant** connector type. diff --git a/raw-migrated-files/docs-content/serverless/observability-analyze-hosts.md b/raw-migrated-files/docs-content/serverless/observability-analyze-hosts.md index 5d747fc33f..21ddbf6cba 100644 --- a/raw-migrated-files/docs-content/serverless/observability-analyze-hosts.md +++ b/raw-migrated-files/docs-content/serverless/observability-analyze-hosts.md @@ -17,7 +17,7 @@ To access the **Hosts** page, in your {{obs-serverless}} project, go to **Infras :class: screenshot ::: -To learn more about the metrics shown on this page, refer to the [Metrics reference](https://www.elastic.co/guide/en/serverless/current/observability-metrics-reference.html) documentation. +To learn more about the metrics shown on this page, refer to the [Metrics reference](asciidocalypse://docs/docs-content/docs/reference/data-analysis/observability/metrics-reference-serverless.md) documentation. ::::{admonition} Don’t see any metrics? :class: note @@ -221,7 +221,7 @@ The **Logs** tab displays logs relating to the host that you have selected. By d | | | | --- | --- | | **Timestamp** | The timestamp of the log entry from the `timestamp` field. | -| **Message** | The message extracted from the document. The content of this field depends on the type of log message. If no special log message type is detected, the [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current/ecs-base.html) base field, `message`, is used. | +| **Message** | The message extracted from the document. The content of this field depends on the type of log message. If no special log message type is detected, the [Elastic Common Schema (ECS)](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-base.md) base field, `message`, is used. | To view the logs in the {{logs-app}} for a detailed analysis, click **Open in Logs**. @@ -264,7 +264,7 @@ To learn more about roles, refer to [Assign user roles and privileges](../../../ ::::{important} -You must have an active [{{agent}}](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html) with an assigned agent policy that includes the [Osquery Manager](https://docs.elastic.co/en/integrations/osquery_manager.html) integration. +You must have an active [{{agent}}](asciidocalypse://docs/docs-content/docs/reference/ingestion-tools/fleet/install-elastic-agents.md) with an assigned agent policy that includes the [Osquery Manager](https://docs.elastic.co/en/integrations/osquery_manager.html) integration. :::: diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-agents-aws-lambda-functions.md b/raw-migrated-files/docs-content/serverless/observability-apm-agents-aws-lambda-functions.md index 690bd95333..eec70d1c24 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-agents-aws-lambda-functions.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-agents-aws-lambda-functions.md @@ -31,9 +31,9 @@ By using an AWS Lambda extension, Elastic APM agents can send data to a local La To get started with monitoring AWS Lambda functions, refer to the APM agent documentation: -* [Monitor AWS Lambda Node.js functions](https://www.elastic.co/guide/en/apm/agent/nodejs/current/lambda.html) -* [Monitor AWS Lambda Python functions](https://www.elastic.co/guide/en/apm/agent/python/current/lambda-support.html) -* [Monitor AWS Lambda Java functions](https://www.elastic.co/guide/en/apm/agent/java/current/aws-lambda.html) +* [Monitor AWS Lambda Node.js functions](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/lambda.md) +* [Monitor AWS Lambda Python functions](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/lambda-support.md) +* [Monitor AWS Lambda Java functions](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/aws-lambda.md) ::::{important} When sending data to an {{obs-serverless}} project, you *must* use an API key. diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-agents-elastic-apm-agents.md b/raw-migrated-files/docs-content/serverless/observability-apm-agents-elastic-apm-agents.md index 7860d4ff43..77827b01ac 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-agents-elastic-apm-agents.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-agents-elastic-apm-agents.md @@ -30,12 +30,12 @@ Spans are grouped in transactions—by default, one for each incoming HTTP reque If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [Java agent reference](https://www.elastic.co/guide/en/apm/agent/java/current/intro.html) for full documentation, including: +See the [Java agent reference](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/java/current/supported-technologies-details.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/java/current/setup.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/java/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/java/current/apis.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/set-up-apm-java-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/tracing-apis.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -56,12 +56,12 @@ These events, called Transactions and Spans, are sent to Elastic, where they’r If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [Node.js agent reference](https://www.elastic.co/guide/en/apm/agent/nodejs/current/intro.html) for full documentation, including: +See the [Node.js agent reference](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/nodejs/current/supported-technologies.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/nodejs/current/set-up.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/nodejs/current/advanced-setup.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/nodejs/current/api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/set-up.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/advanced-setup.md) +* [API reference](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/api.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -86,12 +86,12 @@ In addition to APM and error data, the Python agent also collects system and app If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [Python agent reference](https://www.elastic.co/guide/en/apm/agent/python/current/getting-started.html) for full documentation, including: +See the [Python agent reference](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/python/current/supported-technologies.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/python/current/set-up.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/python/current/api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/set-up-apm-python-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/api-reference.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -112,12 +112,12 @@ These events, called Transactions and Spans, are sent to Elastic, where they’r If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [Ruby agent reference](https://www.elastic.co/guide/en/apm/agent/ruby/current/introduction.html) for full documentation, including: +See the [Ruby agent reference](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/ruby/current/supported-technologies.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/ruby/current/set-up.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/ruby/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/ruby/current/api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/set-up-apm-ruby-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/api-reference.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -144,12 +144,12 @@ In addition to capturing events like those mentioned here, the agent also collec If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [Go agent reference](https://www.elastic.co/guide/en/apm/agent/go/current/introduction.html) for full documentation, including: +See the [Go agent reference](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/go/current/supported-tech.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/go/current/getting-started.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/go/current/api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/set-up-apm-go-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/api-documentation.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -168,12 +168,12 @@ The Agent automatically registers callback methods for built-in Diagnostic Sourc If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [.NET agent reference](https://www.elastic.co/guide/en/apm/agent/dotnet/current/intro.html) for full documentation, including: +See the [.NET agent reference](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/dotnet/current/supported-technologies.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/dotnet/current/setup.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/dotnet/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/dotnet/current/public-api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/set-up-apm-net-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/public-api.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. @@ -190,12 +190,12 @@ The Elastic APM PHP agent measures application performance and tracks errors. Th If you’re ready to give Elastic APM a try, see [Get started with traces and APM](../../../solutions/observability/apps/get-started-with-apm.md). -See the [PHP agent reference](https://www.elastic.co/guide/en/apm/agent/php/current/intro.html) for full documentation, including: +See the [PHP agent reference](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/index.md) for full documentation, including: -* [Supported technologies](https://www.elastic.co/guide/en/apm/agent/php/current/supported-technologies.html) -* [Set up](https://www.elastic.co/guide/en/apm/agent/php/current/setup.html) -* [Configuration reference](https://www.elastic.co/guide/en/apm/agent/php/current/configuration.html) -* [API reference](https://www.elastic.co/guide/en/apm/agent/php/current/public-api.html) +* [Supported technologies](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/supported-technologies.md) +* [Set up](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/set-up-apm-php-agent.md) +* [Configuration reference](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/configuration.md) +* [API reference](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/public-api.md) ::::{important} Not all APM agent configuration options are compatible with Elastic Cloud serverless. diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-agents-opentelemetry.md b/raw-migrated-files/docs-content/serverless/observability-apm-agents-opentelemetry.md index 48ffd54308..69d392c068 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-agents-opentelemetry.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-agents-opentelemetry.md @@ -70,10 +70,10 @@ However, not all features of the OpenTelemetry API are supported when using this Find more details about how to use an OpenTelemetry API or SDK with an Elastic APM agent and which OpenTelemetry API features are supported in the APM agent documentation: -* [**APM Java agent →**](https://www.elastic.co/guide/en/apm/agent/java/current/opentelemetry-bridge.html) -* [**APM .NET agent →**](https://www.elastic.co/guide/en/apm/agent/dotnet/current/opentelemetry-bridge.html) -* [**APM Node.js agent →**](https://www.elastic.co/guide/en/apm/agent/nodejs/current/opentelemetry-bridge.html) -* [**APM Python agent →**](https://www.elastic.co/guide/en/apm/agent/python/current/opentelemetry-bridge.html) +* [**APM Java agent →**](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/opentelemetry-bridge.md) +* [**APM .NET agent →**](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/opentelemetry-bridge.md) +* [**APM Node.js agent →**](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/opentelemetry-bridge.md) +* [**APM Python agent →**](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/opentelemetry-api-bridge.md) ## Upstream OpenTelemetry Collector and language SDKs [observability-apm-agents-opentelemetry-upstream-opentelemetry-collector-and-language-sdks] diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-compress-spans.md b/raw-migrated-files/docs-content/serverless/observability-apm-compress-spans.md index cc5dd23a8c..be823b3049 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-compress-spans.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-compress-spans.md @@ -52,8 +52,8 @@ Support for span compression is available in the following agents and can be con | Agent | Same-kind config | Exact-match config | | --- | --- | --- | -| **Go agent** | [`ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION`](https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html#config-span-compression-exact-match-duration) | -| **Java agent** | [`span_compression_same_kind_max_duration`](https://www.elastic.co/guide/en/apm/agent/java/current/config-huge-traces.html#config-span-compression-same-kind-max-duration) | [`span_compression_exact_match_max_duration`](https://www.elastic.co/guide/en/apm/agent/java/current/config-huge-traces.html#config-span-compression-exact-match-max-duration) | -| **.NET agent** | [`SpanCompressionSameKindMaxDuration`](https://www.elastic.co/guide/en/apm/agent/dotnet/current/config-core.html#config-span-compression-exact-match-max-duration) | -| **Node.js agent** | [`spanCompressionSameKindMaxDuration`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/configuration.html#span-compression-exact-match-max-duration) | -| **Python agent** | [`span_compression_same_kind_max_duration`](https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html#config-span-compression-exact-match-max_duration) | +| **Go agent** | [`ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION`](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/configuration.md#config-span-compression-exact-match-duration) | +| **Java agent** | [`span_compression_same_kind_max_duration`](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/config-huge-traces.md#config-span-compression-same-kind-max-duration) | [`span_compression_exact_match_max_duration`](https://www.elastic.co/guide/en/apm/agent/java/current/config-huge-traces.html#config-span-compression-exact-match-max-duration) | +| **.NET agent** | [`SpanCompressionSameKindMaxDuration`](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/config-core.md#config-span-compression-exact-match-max-duration) | +| **Node.js agent** | [`spanCompressionSameKindMaxDuration`](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/configuration.md#span-compression-exact-match-max-duration) | +| **Python agent** | [`span_compression_same_kind_max_duration`](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/configuration.md#config-span-compression-exact-match-max_duration) | diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-distributed-tracing.md b/raw-migrated-files/docs-content/serverless/observability-apm-distributed-tracing.md index 6fb967561f..f329cac2b6 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-distributed-tracing.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-distributed-tracing.md @@ -86,7 +86,7 @@ Sending services must add the `traceparent` header to outgoing requests. :::::::{tab-set} ::::::{tab-item} Go -1. Start a transaction with [`StartTransaction`](https://www.elastic.co/guide/en/apm/agent/go/current/api.html#tracer-api-start-transaction) or a span with [`StartSpan`](https://www.elastic.co/guide/en/apm/agent/go/current/api.html#transaction-start-span). +1. Start a transaction with [`StartTransaction`](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/api-documentation.md#tracer-api-start-transaction) or a span with [`StartSpan`](https://www.elastic.co/guide/en/apm/agent/go/current/api.html#transaction-start-span). 2. Get the active `TraceContext`. 3. Send the `TraceContext` to the receiving service. @@ -107,7 +107,7 @@ tracestate := traceContext.State.String() :::::: ::::::{tab-item} Java -1. Start a transaction with [`startTransaction`](https://www.elastic.co/guide/en/apm/agent/java/current/public-api.html#api-start-transaction), or a span with [`startSpan`](https://www.elastic.co/guide/en/apm/agent/java/current/public-api.html#api-span-start-span). +1. Start a transaction with [`startTransaction`](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/public-api.md#api-start-transaction), or a span with [`startSpan`](https://www.elastic.co/guide/en/apm/agent/java/current/public-api.html#api-span-start-span). 2. Inject the `traceparent` header into the request object with [`injectTraceHeaders`](https://www.elastic.co/guide/en/apm/agent/java/current/public-api.html#api-transaction-inject-trace-headers) Example of manually instrumenting an RPC framework: @@ -136,7 +136,7 @@ public Response onOutgoingRequest(Request request) throws Exception { :::::: ::::::{tab-item} .NET -1. Serialize the distributed tracing context of the active transaction or span with [`CurrentTransaction`](https://www.elastic.co/guide/en/apm/agent/dotnet/current/public-api.html#api-current-transaction) or [`CurrentSpan`](https://www.elastic.co/guide/en/apm/agent/dotnet/current/public-api.html#api-current-span). +1. Serialize the distributed tracing context of the active transaction or span with [`CurrentTransaction`](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/public-api.md#api-current-transaction) or [`CurrentSpan`](https://www.elastic.co/guide/en/apm/agent/dotnet/current/public-api.html#api-current-span). 2. Send the serialized context the receiving service. Example: @@ -150,7 +150,7 @@ string outgoingDistributedTracingData = :::::: ::::::{tab-item} Node.js -1. Start a transaction with [`apm.startTransaction()`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/agent-api.html#apm-start-transaction), or a span with [`apm.startSpan()`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/agent-api.html#apm-start-span). +1. Start a transaction with [`apm.startTransaction()`](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/agent-api.md#apm-start-transaction), or a span with [`apm.startSpan()`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/agent-api.html#apm-start-span). 2. Get the serialized `traceparent` string of the started transaction/span with [`currentTraceparent`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/agent-api.html#apm-current-traceparent). 3. Encode the `traceparent` and send it to the receiving service inside your regular request. @@ -181,7 +181,7 @@ $distDataAsString = ElasticApm::getSerializedCurrentDistributedTracingData(); :::::: ::::::{tab-item} Python -1. Start a transaction with [`begin_transaction()`](https://www.elastic.co/guide/en/apm/agent/python/current/api.html#client-api-begin-transaction). +1. Start a transaction with [`begin_transaction()`](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/api-reference.md#client-api-begin-transaction). 2. Get the `trace_parent` of the active transaction. 3. Send the `trace_parent` to the receiving service. @@ -200,7 +200,7 @@ elasticapm.get_trace_parent_header('new-transaction') <2> :::::: ::::::{tab-item} Ruby -1. Start a span with [`with_span`](https://www.elastic.co/guide/en/apm/agent/ruby/current/api.html#api-agent-with_span). +1. Start a span with [`with_span`](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/api-reference.md#api-agent-with_span). 2. Get the active `TraceContext`. 3. Send the `TraceContext` to the receiving service. @@ -308,7 +308,7 @@ agent.startTransaction('my-service-b-transaction', { childOf: traceparent }) < ::::::{tab-item} PHP 1. Receive the distributed tracing data on the server side. -2. Begin a new transaction using the agent’s public API. For example, use [`ElasticApm::beginCurrentTransaction`](https://www.elastic.co/guide/en/apm/agent/php/current/public-api.html#api-elasticapm-class-begin-current-transaction) and pass the received distributed tracing data (serialized as string) as a parameter. This will create a new transaction as a child of the incoming trace context. +2. Begin a new transaction using the agent’s public API. For example, use [`ElasticApm::beginCurrentTransaction`](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/public-api.md#api-elasticapm-class-begin-current-transaction) and pass the received distributed tracing data (serialized as string) as a parameter. This will create a new transaction as a child of the incoming trace context. 3. Don’t forget to eventually end the transaction on the server side. Example: diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-filter-your-data.md b/raw-migrated-files/docs-content/serverless/observability-apm-filter-your-data.md index 45021f82eb..dab5c4ec15 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-filter-your-data.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-filter-your-data.md @@ -30,10 +30,10 @@ The environment selector is a global filter for `service.environment`. It allows Service environments are defined when configuring your APM agents. It’s vital to be consistent when naming environments in your APM agents. To learn how to configure service environments, see the specific APM agent documentation: -* **Go:** [`ELASTIC_APM_ENVIRONMENT`](https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html#config-environment) -* **Java:** [`environment`](https://www.elastic.co/guide/en/apm/agent/java/current/config-core.html#config-environment) -* **.NET:** [`Environment`](https://www.elastic.co/guide/en/apm/agent/dotnet/current/config-core.html#config-environment) -* **Node.js:** [`environment`](https://www.elastic.co/guide/en/apm/agent/nodejs/current/configuration.html#environment) -* **PHP:** [`environment`](https://www.elastic.co/guide/en/apm/agent/php/current/configuration-reference.html#config-environment) -* **Python:** [`environment`](https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html#config-environment) -* **Ruby:** [`environment`](https://www.elastic.co/guide/en/apm/agent/ruby/current/configuration.html#config-environment) +* **Go:** [`ELASTIC_APM_ENVIRONMENT`](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/configuration.md#config-environment) +* **Java:** [`environment`](asciidocalypse://docs/apm-agent-java/docs/reference/ingestion-tools/apm-agent-java/config-core.md#config-environment) +* **.NET:** [`Environment`](asciidocalypse://docs/apm-agent-dotnet/docs/reference/ingestion-tools/apm-agent-dotnet/config-core.md#config-environment) +* **Node.js:** [`environment`](asciidocalypse://docs/apm-agent-nodejs/docs/reference/ingestion-tools/apm-agent-nodejs/configuration.md#environment) +* **PHP:** [`environment`](asciidocalypse://docs/apm-agent-php/docs/reference/ingestion-tools/apm-agent-php/configuration-reference.md#config-environment) +* **Python:** [`environment`](asciidocalypse://docs/apm-agent-python/docs/reference/ingestion-tools/apm-agent-python/configuration.md#config-environment) +* **Ruby:** [`environment`](asciidocalypse://docs/apm-agent-ruby/docs/reference/ingestion-tools/apm-agent-ruby/configuration.md#config-environment) diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-find-transaction-latency-and-failure-correlations.md b/raw-migrated-files/docs-content/serverless/observability-apm-find-transaction-latency-and-failure-correlations.md index 9e97431632..10509bf4ba 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-find-transaction-latency-and-failure-correlations.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-find-transaction-latency-and-failure-correlations.md @@ -41,7 +41,7 @@ In this example screenshot, there are transactions that are skewed to the right ## Find failed transaction correlations [correlations-error-rate] -The correlations on the **Failed transaction correlations** tab help you discover which attributes are most influential in distinguishing between transaction failures and successes. In this context, the success or failure of a transaction is determined by its [event.outcome](https://www.elastic.co/guide/en/ecs/current/ecs-event.html#field-event-outcome) value. For example, APM agents set the `event.outcome` to `failure` when an HTTP transaction returns a `5xx` status code. +The correlations on the **Failed transaction correlations** tab help you discover which attributes are most influential in distinguishing between transaction failures and successes. In this context, the success or failure of a transaction is determined by its [event.outcome](asciidocalypse://docs/ecs/docs/reference/ecs/ecs-event.md#field-event-outcome) value. For example, APM agents set the `event.outcome` to `failure` when an HTTP transaction returns a `5xx` status code. The chart highlights the failed transactions in the overall latency distribution for the transaction group. If there are attributes that have a statistically significant correlation with failed transactions, they are listed in a table. The table is sorted by scores, which are mapped to high, medium, or low impact levels. Attributes with high impact levels are more likely to contribute to failed transactions. By default, the attribute with the highest score is added to the chart. To see a different attribute in the chart, select its row in the table. diff --git a/raw-migrated-files/docs-content/serverless/observability-apm-get-started.md b/raw-migrated-files/docs-content/serverless/observability-apm-get-started.md index 6c7c4e39b0..0319ad9136 100644 --- a/raw-migrated-files/docs-content/serverless/observability-apm-get-started.md +++ b/raw-migrated-files/docs-content/serverless/observability-apm-get-started.md @@ -81,14 +81,14 @@ To send APM data to Elastic, you must install an APM agent and configure it to s Instrumentation is the process of extending your application’s code to report trace data to Elastic APM. Go applications must be instrumented manually at the source code level. To instrument your applications, use one of the following approaches: - * [Built-in instrumentation modules](https://www.elastic.co/guide/en/apm/agent/go/current/builtin-modules.html). - * [Custom instrumentation](https://www.elastic.co/guide/en/apm/agent/go/current/custom-instrumentation.html) and context propagation with the Go Agent API. + * [Built-in instrumentation modules](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/builtin-modules.md). + * [Custom instrumentation](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/custom-instrumentation.md) and context propagation with the Go Agent API. **Learn more in the {{apm-agent}} reference** - * [Supported technologies](https://www.elastic.co/guide/en/apm/agent/go/current/supported-tech.html) - * [Advanced configuration](https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html) - * [Detailed guide to instrumenting Go source code](https://www.elastic.co/guide/en/apm/agent/go/current/getting-started.html) + * [Supported technologies](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/supported-technologies.md) + * [Advanced configuration](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/configuration.md) + * [Detailed guide to instrumenting Go source code](asciidocalypse://docs/apm-agent-go/docs/reference/ingestion-tools/apm-agent-go/set-up-apm-go-agent.md)